diff --git a/.github/.gitignore b/.github/.gitignore index b4ddc884c6b9..1ed369f03020 100644 --- a/.github/.gitignore +++ b/.github/.gitignore @@ -1 +1,2 @@ -.secrets \ No newline at end of file +.secrets +.act-tool-cache diff --git a/.github/local_workflow.sh b/.github/local_workflow.sh new file mode 100755 index 000000000000..ac32b623a4fe --- /dev/null +++ b/.github/local_workflow.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# Runs a github workflow locally. +# +# Needs `act`. See https://nektosact.com/installation/index.html +# +# Bind-mounts the local directory into the container, which executes as the current user. +# Attempts to use a GCP service account, which you can download from +# https://console.cloud.google.com/iam-admin/serviceaccounts + +# Your workflow may not need a GCP service account, nor a kubeconfig, etc. +# Feel free to send a PR to tweak the script ;) + +# example usage: +# export GOOGLE_APPLICATION_CREDENTIALS=/your/path/to/testnet-helm-sa.json +# alias lwfl=/your/path/to/aztec-clones/alpha/.github/local_workflow.sh +# lwfl deploy_eth_devnet --input cluster=kind --input resource_profile=dev --input namespace=mitch-eth-devnet --input create_static_ips=false +# lwfl deploy_eth_devnet --input cluster=aztec-gke-private --input resource_profile=prod --input namespace=mitch-eth-devnet --input create_static_ips=false + +workflow_name=$1 + +REPO_ROOT=$(git rev-parse --show-toplevel) + +if [ -z "$workflow_name" ]; then + echo "Usage: $0 [args ...]" + exit 1 +fi + +# get the rest of the args (skip the first one which is the workflow name) +shift +args=("$@") + +# Only needed when running against GKE +SA_KEY_JSON=$(cat "$GOOGLE_APPLICATION_CREDENTIALS") + +mkdir -p $REPO_ROOT/.github/.act-tool-cache + +act -j $workflow_name \ + --env RUNNER_TOOL_CACHE=/work/toolcache \ + -s GITHUB_TOKEN="$(gh auth token)" \ + -s GCP_SA_KEY="$SA_KEY_JSON" \ + -s KUBECONFIG_B64="$(cat $HOME/.kube/config | base64 -w0)" \ + --container-options "-v $REPO_ROOT/.github/.act-tool-cache:/work/toolcache --user $(id -u):$(id -g)" \ + --bind \ + --directory $REPO_ROOT "${args[@]}" diff --git a/.github/release-please-v2.json b/.github/release-please-v2.json new file mode 100644 index 000000000000..a7f1ff2fc063 --- /dev/null +++ b/.github/release-please-v2.json @@ -0,0 +1,23 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "release-type": "simple", + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": true, + "group-pull-request-title-pattern": "chore(v2): Release ${version}", + "pull-request-header": "Pending Aztec Packages v2 release", + "versioning": "always-bump-patch", + "include-component-in-tag": false, + "changelog-sections": [ + { "type": "feat", "section": "Features", "hidden": false }, + { "type": "fix", "section": "Bug Fixes", "hidden": false }, + { "type": "chore", "section": "Miscellaneous", "hidden": false }, + { "type": "test", "section": "Miscellaneous", "hidden": false }, + { "type": "refactor", "section": "Miscellaneous", "hidden": false }, + { "type": "docs", "section": "Documentation", "hidden": false } + ], + "packages": { + ".": { + "release-type": "simple" + } + } +} diff --git a/.github/workflows/ci3-external.yml b/.github/workflows/ci3-external.yml index ea54a112b84b..fddfd9af34b6 100644 --- a/.github/workflows/ci3-external.yml +++ b/.github/workflows/ci3-external.yml @@ -31,6 +31,7 @@ jobs: with: # The commit to checkout. We want our actual commit, and not the result of merging the PR to the target. ref: ${{ github.event.pull_request.head.sha || github.sha }} + persist-credentials: false # NOTE: in ci3.yml we just rely on draft mode not being mergable. # Here we are a little more careful than just skipping the worklfow, in case of an edge case allowing merge. @@ -43,7 +44,7 @@ jobs: run: | set -o pipefail git fetch origin ${{ github.event.pull_request.base.ref }} --depth=1 &>/dev/null - forbidden_changes=$(git diff --name-only origin/${{ github.event.pull_request.base.ref }} HEAD -- ci3 .github ci.sh) + forbidden_changes=$(git diff --name-only origin/${{ github.event.pull_request.base.ref }} HEAD -- ci3 .github ci.sh scripts) if echo "$forbidden_changes" | grep -q .; then echo "Error: External PRs can't contain CI changes (forbidden files: $forbidden_changes)." exit 1 @@ -63,6 +64,10 @@ jobs: # Remove any ci-external-once labels. GITHUB_TOKEN=${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} gh pr edit ${{ github.event.pull_request.number }} --remove-label "ci-external-once" + - name: CI Merge Queue Override (grind on PR) + if: contains(github.event.pull_request.labels.*.name, 'ci-merge-queue') + run: echo "CI_MERGE_QUEUE=1" >> $GITHUB_ENV + - name: CI Full Override if: contains(github.event.pull_request.labels.*.name, 'ci-full') run: echo "CI_FULL=1" >> $GITHUB_ENV @@ -82,10 +87,21 @@ jobs: echo ${{ secrets.BUILD_INSTANCE_SSH_KEY }} | base64 --decode > ~/.ssh/build_instance_key chmod 600 ~/.ssh/build_instance_key + - name: Get Tree Hash + run: echo "TREE_HASH=$(git rev-parse HEAD^{tree})" >> $GITHUB_ENV + + - name: Check CI Cache + id: ci_cache + uses: actions/cache@v3 + with: + path: ci-success.txt + key: ci-external-${{ env.TREE_HASH }} + ############# # Run ############# - name: Run + if: steps.ci_cache.outputs.cache-hit != 'true' env: # We need to pass these creds to start the AWS ec2 instance. # They are not injected into that instance. Instead, it has minimal @@ -97,8 +113,32 @@ jobs: ARCH: amd64 RUN_ID: ${{ github.run_id }} run: | - if [ "${CI_FULL:-0}" -eq 1 ]; then + if [ "${CI_MERGE_QUEUE:-0}" -eq 1 ]; then exec ./ci.sh merge-queue + elif [ "${CI_FULL:-0}" -eq 1 ]; then + exec ./ci.sh full else exec ./ci.sh fast fi + + - name: Save CI Success + if: steps.ci_cache.outputs.cache-hit != 'true' + run: echo "success" > ci-success.txt + + # If we have passed CI and labelled with ci-squash-and-merge, squash the PR. + # This will rerun CI on the squash commit - but is intended to be a no-op due to caching. + - name: CI Squash and Merge + if: contains(github.event.pull_request.labels.*.name, 'ci-squash-and-merge') + env: + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + run: | + # Reauth the git repo with our GITHUB_TOKEN + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${{ github.repository }} + # Get the base commit (merge-base) for the PR + ./scripts/merge-train/squash-pr.sh \ + "${{ github.event.pull_request.number }}" \ + "${{ github.event.pull_request.head.ref }}" \ + "${{ github.event.pull_request.base.ref }}" \ + "${{ github.event.pull_request.base.sha }}" + gh pr edit "${{ github.event.pull_request.number }}" --remove-label "ci-squash-and-merge" + gh pr merge "${{ github.event.pull_request.number }}" --auto -m || true diff --git a/.github/workflows/ci3.yml b/.github/workflows/ci3.yml index f1508dd10080..64058919ec76 100644 --- a/.github/workflows/ci3.yml +++ b/.github/workflows/ci3.yml @@ -25,6 +25,8 @@ jobs: # (github.event.pull_request.head.repo.fork resolves to nil if not a pull request) if: github.event.pull_request.head.repo.fork != true && github.event.pull_request.draft == false environment: ${{ startsWith(github.ref, 'refs/tags/v') && 'master' || '' }} + env: + GOOGLE_APPLICATION_CREDENTIALS: /tmp/gcp-key.json steps: ############# # Prepare Env @@ -36,7 +38,7 @@ jobs: ref: ${{ github.event.pull_request.head.sha || github.sha }} # Fetch PR commits depth (we'll deepen by 1 in squash script if needed) fetch-depth: ${{ github.event.pull_request.commits || 0 }} - token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + persist-credentials: false - name: CI Merge Queue Override (grind on PR) if: contains(github.event.pull_request.labels.*.name, 'ci-merge-queue') @@ -54,10 +56,6 @@ jobs: if: contains(github.event.pull_request.labels.*.name, 'ci-no-fail-fast') run: echo "NO_FAIL_FAST=1" >> $GITHUB_ENV - - name: Barretenberg CI Override - if: contains(github.event.pull_request.labels.*.name, 'ci-barretenberg') - run: echo "CI_BARRETENBERG=1" >> $GITHUB_ENV - - name: Compute Target Branch id: target_branch run: | @@ -91,15 +89,16 @@ jobs: mkdir -p ~/.ssh echo ${{ secrets.BUILD_INSTANCE_SSH_KEY }} | base64 --decode > ~/.ssh/build_instance_key chmod 600 ~/.ssh/build_instance_key - # Install required packages. - sudo apt update && sudo apt install -y redis-tools parallel + sudo apt install -y --no-install-recommends redis-tools parallel - - name: Prepare GCP key + - name: Store the GCP key in a file env: GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} run: | - echo "$GCP_SA_KEY" | base64 -w 0 > gcp_sa_key.b64 - echo "GCP_SA_KEY_B64=$(cat gcp_sa_key.b64)" >> $GITHUB_ENV + set +x + umask 077 + printf '%s' "$GCP_SA_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS" + jq -e . "$GOOGLE_APPLICATION_CREDENTIALS" >/dev/null - name: Get Tree Hash run: echo "TREE_HASH=$(git rev-parse HEAD^{tree})" >> $GITHUB_ENV @@ -127,7 +126,7 @@ jobs: NPM_TOKEN: ${{ secrets.NPM_TOKEN }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} # Nightly test env vars. - GCP_SA_KEY_B64: ${{ env.GCP_SA_KEY_B64 }} + GOOGLE_APPLICATION_CREDENTIALS: ${{ env.GOOGLE_APPLICATION_CREDENTIALS }} EXTERNAL_ETHEREUM_HOSTS: "https://json-rpc.${{ secrets.GCP_SEPOLIA_URL }}?key=${{ secrets.GCP_SEPOLIA_API_KEY }},${{ secrets.INFURA_SEPOLIA_URL }}" EXTERNAL_ETHEREUM_CONSENSUS_HOST: "https://beacon.${{ secrets.GCP_SEPOLIA_URL }}" EXTERNAL_ETHEREUM_CONSENSUS_HOST_API_KEY: ${{ secrets.GCP_SEPOLIA_API_KEY }} @@ -142,7 +141,7 @@ jobs: exec ./ci.sh docs elif [ "${CI_BARRETENBERG:-0}" -eq 1 ]; then exec ./ci.sh barretenberg - elif [ "${{ contains(github.ref, '-nightly.') }}" == "true" ]; then + elif [ "${{ contains(github.ref, '-nightly.') }}" == "true" ] || [ "${{ contains(github.ref, '-rc.') }}" == "true" ]; then exec ./ci.sh nightly elif [ "${{ startsWith(github.ref, 'refs/tags/v') }}" == "true" ]; then exec ./ci.sh release @@ -161,6 +160,8 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} run: | + # Reauth the git repo with our GITHUB_TOKEN + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${{ github.repository }} # Get the base commit (merge-base) for the PR ./scripts/merge-train/squash-pr.sh \ "${{ github.event.pull_request.number }}" \ @@ -168,14 +169,14 @@ jobs: "${{ github.event.pull_request.base.ref }}" \ "${{ github.event.pull_request.base.sha }}" gh pr edit "${{ github.event.pull_request.number }}" --remove-label "ci-squash-and-merge" - gh pr ready "${{ github.event.pull_request.number }}" || true + gh pr merge "${{ github.event.pull_request.number }}" --auto -m || true - name: Download benchmarks - if: github.event_name == 'merge_group' + if: github.event_name == 'merge_group' || env.CI_FULL == '1' run: ./ci.sh gh-bench - name: Upload benchmarks - if: github.event_name == 'merge_group' + if: github.event_name == 'merge_group' || env.CI_FULL == '1' uses: benchmark-action/github-action-benchmark@4de1bed97a47495fc4c5404952da0499e31f5c29 with: name: Aztec Benchmarks diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml new file mode 100644 index 000000000000..725e124ce57b --- /dev/null +++ b/.github/workflows/create-release-branch.yml @@ -0,0 +1,66 @@ +name: Create Release Branch + +# Take the current version from the release-please-manifest.json file on `next`, +# and create a release branch for it. Then update the release-please-manifest.json file on `next` to the next version. + +on: + workflow_dispatch: + inputs: + source_commit: + description: "Source commit SHA from next branch" + required: true + type: string + +env: + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + +jobs: + create-release-branch: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event.inputs.source_commit }} + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + fetch-depth: 0 + + - name: Configure Git + run: | + git config --global user.name "AztecBot" + git config --global user.email "tech@aztecprotocol.com" + + - name: Create release branch + run: | + # Get the version from the release-please-manifest.json file + CURRENT_VERSION=$(jq -r '."."' .release-please-manifest.json) + + # grab major version + CURRENT_MAJOR_VERSION="${CURRENT_VERSION%%.*}" + + # create branch name + BRANCH_NAME="v${CURRENT_MAJOR_VERSION}" + + # Check if branch already exists + if git ls-remote --heads origin "$BRANCH_NAME" | grep -q "$BRANCH_NAME"; then + echo "Error: Branch $BRANCH_NAME already exists" + exit 1 + fi + + # Create and push the branch + git checkout -b "$BRANCH_NAME" + git push origin "$BRANCH_NAME" + + # increment major version + NEXT_MAJOR_VERSION=$((CURRENT_MAJOR_VERSION + 1)) + NEXT_VERSION="${NEXT_MAJOR_VERSION}.0.0" + + # update release-please-manifest.json on `next` branch + git fetch origin next + git checkout -B next origin/next + jq --arg version "$NEXT_VERSION" '.["."] = $version' .release-please-manifest.json > temp.json && mv temp.json .release-please-manifest.json + git add .release-please-manifest.json + git commit -m "chore(release): update release-please-manifest.json to $NEXT_VERSION" + git push origin next diff --git a/.github/workflows/deploy-network.yml b/.github/workflows/deploy-network.yml new file mode 100644 index 000000000000..b596a3a4dcd9 --- /dev/null +++ b/.github/workflows/deploy-network.yml @@ -0,0 +1,164 @@ +# Low-level workflow to deploy a single network +# This is called by other deployment workflows +name: Deploy Network + +on: + workflow_call: + inputs: + network: + description: 'Network to deploy (e.g., staging-public, staging-ignition, testnet, next-net)' + required: true + type: string + semver: + description: 'Semver version (e.g., 2.3.4)' + required: true + type: string + docker_image_tag: + description: 'Full docker image tag (optional, defaults to semver)' + required: false + type: string + ref: + description: 'Git ref to checkout' + required: false + type: string + workflow_dispatch: + inputs: + network: + description: 'Network to deploy (e.g., staging-public, staging-ignition, testnet, next-net)' + required: true + type: choice + options: + - staging-public + - staging-ignition + - testnet + - next-net + semver: + description: 'Semver version (e.g., 2.3.4)' + required: true + type: string + docker_image_tag: + description: 'Full docker image tag (optional, defaults to semver)' + required: false + type: string + +concurrency: + group: deploy-network-${{ inputs.network }}-${{ inputs.semver }}-${{ github.ref || github.ref_name }} + cancel-in-progress: true + +jobs: + deploy-network: + runs-on: ubuntu-latest + env: + GOOGLE_APPLICATION_CREDENTIALS: /tmp/gcp-key.json + steps: + - name: Determine checkout ref + id: checkout-ref + run: | + # Use inputs.ref if provided (workflow_call), otherwise use github.ref + if [[ -n "${{ inputs.ref }}" ]]; then + echo "ref=${{ inputs.ref }}" >> $GITHUB_OUTPUT + else + echo "ref=${{ github.ref }}" >> $GITHUB_OUTPUT + fi + + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ steps.checkout-ref.outputs.ref }} + fetch-depth: 0 + persist-credentials: false + submodules: recursive # Initialize git submodules for l1-contracts dependencies + + - name: Validate inputs + run: | + # Validate network + if [[ ! -f "spartan/environments/${{ inputs.network }}.env" ]]; then + echo "Error: Environment file not found for network '${{ inputs.network }}'" + echo "Available networks:" + ls -1 spartan/environments/ | grep -v '\.local\.env$' || echo "No environment files found" + exit 1 + fi + + # Validate semver format + if ! echo "${{ inputs.semver }}" | grep -Eq '^[0-9]+\.[0-9]+\.[0-9]+(-.*)?$'; then + echo "Error: Invalid semver format '${{ inputs.semver }}'. Expected format: X.Y.Z or X.Y.Z-suffix" + exit 1 + fi + + # Extract major version for v2 check + major_version="${{ inputs.semver }}" + major_version="${major_version%%.*}" + echo "MAJOR_VERSION=$major_version" >> $GITHUB_ENV + + - name: Store the GCP key in a file + env: + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + run: | + set +x + umask 077 + printf '%s' "$GCP_SA_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS" + jq -e . "$GOOGLE_APPLICATION_CREDENTIALS" >/dev/null + + - name: Setup GCP authentication + run: | + gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS" + + - name: Setup gcloud and install GKE auth plugin + uses: google-github-actions/setup-gcloud@v2 + with: + install_components: 'gke-gcloud-auth-plugin' + + - name: Setup Terraform + uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 + with: + terraform_version: "1.7.5" + terraform_wrapper: false # Disable the wrapper that adds debug output, this messes with reading terraform output + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Deploy network + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + RUN_ID: ${{ github.run_id }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + GOOGLE_APPLICATION_CREDENTIALS: ${{ env.GOOGLE_APPLICATION_CREDENTIALS }} + REF_NAME: "v${{ inputs.semver }}" + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + AZTEC_DOCKER_IMAGE: "aztecprotocol/aztec:${{ inputs.docker_image_tag || inputs.semver }}" + run: | + echo "Deploying network: ${{ inputs.network }}" + echo "Using image: $AZTEC_DOCKER_IMAGE" + echo "Using branch/ref: ${{ steps.checkout-ref.outputs.ref }}" + + cd spartan + ./scripts/install_deps.sh + ./scripts/network_deploy.sh "${{ inputs.network }}" + + - name: Update testnet monitoring (testnet only) + if: inputs.network == 'testnet' && !contains(inputs.semver, '-') + env: + MONITORING_NAMESPACE: testnet-block-height-monitor + run: | + echo "Updating monitoring app for testnet deployment..." + ./spartan/metrics/testnet-monitor/scripts/update-monitoring.sh testnet ${{ env.MONITORING_NAMESPACE }} + + - name: Notify Slack on failure + if: failure() + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: | + if [ -n "${SLACK_BOT_TOKEN}" ]; then + read -r -d '' data <" + } + EOF + curl -X POST https://slack.com/api/chat.postMessage \ + -H "Authorization: Bearer $SLACK_BOT_TOKEN" \ + -H "Content-type: application/json" \ + --data "$data" + fi diff --git a/.github/workflows/deploy-next-net.yml b/.github/workflows/deploy-next-net.yml new file mode 100644 index 000000000000..60a7329b4ade --- /dev/null +++ b/.github/workflows/deploy-next-net.yml @@ -0,0 +1,72 @@ +# Deploy next-net environment +# This workflow deploys the next-net environment with a specified version +# Runs nightly with the latest nightly tag, or can be manually triggered with any image +name: Deploy Next Net + +on: + schedule: + # Run every night at 4:00 AM UTC, after the nightly tag has been created + - cron: "0 4 * * *" + workflow_dispatch: + inputs: + image_tag: + description: 'Docker image tag (e.g., 2.3.4, 3.0.0-nightly.20251004-amd64, or leave empty for latest nightly)' + required: false + type: string + +concurrency: + group: deploy-next-net-${{ inputs.image_tag || 'nightly' }} + cancel-in-progress: true + +jobs: + get-image-tag: + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.determine_tag.outputs.TAG }} + semver: ${{ steps.determine_tag.outputs.SEMVER }} + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + + - name: Determine image tag + id: determine_tag + run: | + if [[ -n "${{ inputs.image_tag }}" ]]; then + # Manual trigger with specified tag + TAG="${{ inputs.image_tag }}" + echo "Using manually specified tag: $TAG" + + # Extract semver (remove -amd64 suffix if present, remove -nightly.YYYYMMDD if present) + SEMVER=$(echo "$TAG" | sed 's/-amd64$//' | sed 's/-nightly\.[0-9]\{8\}//') + else + # Scheduled nightly run - get latest nightly tag + current_version=$(jq -r '."."' .release-please-manifest.json) + echo "Current version: $current_version" + + # Format the tag as: -nightly.-amd64 + nightly_tag="${current_version}-nightly.$(date -u +%Y%m%d)-amd64" + + # Check if the tag exists on docker hub + TAGS=$(curl -s https://registry.hub.docker.com/v2/repositories/aztecprotocol/aztec/tags/$nightly_tag) + if [[ "$TAGS" != *"not found"* ]]; then + TAG="$nightly_tag" + SEMVER="$current_version" + echo "Using nightly tag: $TAG" + else + echo "Error: Tag $nightly_tag not published to docker hub" + exit 1 + fi + fi + + echo "TAG=$TAG" >> "$GITHUB_OUTPUT" + echo "SEMVER=$SEMVER" >> "$GITHUB_OUTPUT" + + deploy-next-net: + needs: get-image-tag + uses: ./.github/workflows/deploy-network.yml + with: + network: next-net + semver: ${{ needs.get-image-tag.outputs.semver }} + docker_image_tag: ${{ needs.get-image-tag.outputs.tag }} + ref: ${{ github.ref }} + secrets: inherit diff --git a/.github/workflows/deploy-staging-network.yml b/.github/workflows/deploy-staging-network.yml new file mode 100644 index 000000000000..683578bd4914 --- /dev/null +++ b/.github/workflows/deploy-staging-network.yml @@ -0,0 +1,47 @@ +# Deploy a single staging network +# This workflow can be called directly or from other workflows +# Now delegates to the lower-level deploy-network workflow +name: Deploy Staging Network + +on: + workflow_call: + inputs: + network: + description: 'Network to deploy (e.g., staging-public, staging-ignition, testnet)' + required: true + type: string + semver: + description: 'Semver version (e.g., 2.3.4)' + required: true + type: string + ref: + description: 'Git ref to checkout' + required: false + type: string + workflow_dispatch: + inputs: + network: + description: 'Network to deploy (e.g., staging-public, staging-ignition, testnet)' + required: true + type: choice + options: + - staging-public + - staging-ignition + - testnet + semver: + description: 'Semver version (e.g., 2.3.4)' + required: true + type: string + +concurrency: + group: deploy-staging-network-${{ inputs.network }}-${{ inputs.semver }}-${{ github.ref || github.ref_name }} + cancel-in-progress: true + +jobs: + deploy-network: + uses: ./.github/workflows/deploy-network.yml + with: + network: ${{ inputs.network }} + semver: ${{ inputs.semver }} + ref: ${{ inputs.ref }} + secrets: inherit diff --git a/.github/workflows/deploy-staging-networks.yml b/.github/workflows/deploy-staging-networks.yml new file mode 100644 index 000000000000..cfe7efd279af --- /dev/null +++ b/.github/workflows/deploy-staging-networks.yml @@ -0,0 +1,103 @@ +# Deploy staging networks when CI3 completes for a tagged release, or manually with a semver input. +# Only runs on v2 releases. +name: Deploy Staging Networks + +on: + workflow_run: + workflows: ["CI3"] + types: + - completed + workflow_dispatch: + inputs: + semver: + description: Semver version (e.g., 2.3.4) + required: true + type: string + +concurrency: + group: deploy-staging-networks-${{ (github.event_name == 'workflow_run' && github.event.workflow_run.head_sha) || (github.event_name == 'workflow_dispatch' && inputs.semver) || github.sha }} + cancel-in-progress: true + +jobs: + determine-semver: + if: | + (github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success') || + (github.event_name == 'workflow_dispatch') + runs-on: ubuntu-latest + outputs: + semver: ${{ steps.semver.outputs.value }} + major_version: ${{ steps.semver.outputs.major_version }} + should_deploy: ${{ steps.semver.outputs.should_deploy }} + branch: ${{ steps.branch.outputs.value }} + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_sha || github.ref }} + fetch-depth: 0 + persist-credentials: false + + - name: Determine branch + id: branch + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.ref }}" ]]; then + echo "value=${{ github.ref }}" >> $GITHUB_OUTPUT + elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "value=${{ github.ref_name }}" >> $GITHUB_OUTPUT + else + echo "value=${{ github.event.workflow_run.head_branch || github.ref_name }}" >> $GITHUB_OUTPUT + fi + + - name: Determine semver + id: semver + run: | + if [[ "${{ github.event_name }}" == "workflow_run" ]]; then + git fetch --tags --force + tag=$(git tag --points-at "${{ github.event.workflow_run.head_sha }}" | head -n1) + if ! echo "$tag" | grep -Eq '^v[0-9]+\.[0-9]+\.[0-9]+'; then + echo "No semver tag found for head_sha: ${{ github.event.workflow_run.head_sha }}. Skipping." + echo "should_deploy=false" >> $GITHUB_OUTPUT + exit 0 + fi + semver="${tag#v}" + else + semver="${{ inputs.semver }}" + fi + + major_version="${semver%%.*}" + echo "value=$semver" >> $GITHUB_OUTPUT + echo "major_version=$major_version" >> $GITHUB_OUTPUT + echo "should_deploy=true" >> $GITHUB_OUTPUT + + deploy-staging-public: + needs: determine-semver + if: needs.determine-semver.outputs.should_deploy == 'true' && needs.determine-semver.outputs.major_version == '2' + uses: ./.github/workflows/deploy-staging-network.yml + with: + network: staging-public + semver: ${{ needs.determine-semver.outputs.semver }} + ref: ${{ needs.determine-semver.outputs.branch }} + secrets: inherit + + deploy-staging-ignition: + # Depends on staging-public until we are confident in concurrent deployments + needs: [determine-semver, deploy-staging-public] + if: needs.determine-semver.outputs.should_deploy == 'true' && needs.determine-semver.outputs.major_version == '2' + uses: ./.github/workflows/deploy-staging-network.yml + with: + network: staging-ignition + semver: ${{ needs.determine-semver.outputs.semver }} + ref: ${{ needs.determine-semver.outputs.branch }} + secrets: inherit + + deploy-testnet: + # Depends on staging-ignition until we are confident in concurrent deployments + needs: [determine-semver, deploy-staging-ignition] + # Only deploy testnet if we are not a pre-release (i.e. semver does not contain a hyphen) + if: needs.determine-semver.outputs.should_deploy == 'true' && needs.determine-semver.outputs.major_version == '2' && !contains(needs.determine-semver.outputs.semver, '-') + uses: ./.github/workflows/deploy-staging-network.yml + with: + network: testnet + semver: ${{ needs.determine-semver.outputs.semver }} + ref: ${{ needs.determine-semver.outputs.branch }} + secrets: inherit diff --git a/.github/workflows/devnet-deploy.yml b/.github/workflows/devnet-deploy.yml deleted file mode 100644 index 16c726c59c4a..000000000000 --- a/.github/workflows/devnet-deploy.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: Deploy devnet - -on: - workflow_dispatch: - inputs: - cluster: - description: The cluster to deploy to, e.g. aztec-gke-public - required: true - default: "aztec-gke-public" - namespace: - description: The namespace to deploy to, e.g. smoke - required: true - default: "devnet-canary" - aztec_docker_image: - description: The Aztec Docker image to use - required: true - default: "aztecprotocol/aztec" - deployment_mnemonic_secret_name: - description: The name of the secret which holds the boot node's contract deployment mnemonic - required: true - default: junk-mnemonic - deployment_salt: - description: The salt to use for this deployment. Defaults to random - required: false - type: string - default: "" - respect_tf_lock: - description: Whether to respect the Terraform lock - required: false - default: "true" - sepolia_deployment: - description: Whether to deploy to Sepolia - required: false - default: "false" - ref: - description: The branch name to deploy from - required: false - type: string - default: "master" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - CONTRACT_S3_BUCKET: s3://static.aztec.network - CLUSTER_NAME: ${{ inputs.cluster }} - REGION: us-west1-a - NAMESPACE: ${{ inputs.namespace }} - AZTEC_DOCKER_IMAGE: ${{ inputs.aztec_docker_image }} - -jobs: - deploy-network: - uses: ./.github/workflows/network-deploy.yml - with: - namespace: ${{ github.event.inputs.namespace }} - cluster: ${{ github.event.inputs.cluster }} - # This represents the name of the deployment as well. - values_file: release-devnet.yaml - aztec_docker_image: ${{ github.event.inputs.aztec_docker_image }} - deployment_mnemonic_secret_name: ${{ github.event.inputs.deployment_mnemonic_secret_name }} - deployment_salt: ${{ github.event.inputs.deployment_salt }} - respect_tf_lock: ${{ github.event.inputs.respect_tf_lock }} - run_terraform_destroy: "true" - sepolia_deployment: ${{ github.event.inputs.sepolia_deployment }} - ref: ${{ github.event.inputs.ref }} - - secrets: - GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} - - bootstrap-network: - runs-on: ubuntu-latest - needs: deploy-network - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@6a64f289c4a4b67a1e2c44cc4bd9d6f7bc59b156 - with: - aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - - - name: Authenticate to Google Cloud - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f - with: - credentials_json: ${{ secrets.GCP_SA_KEY }} - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a - - - name: Setup kubectl access - run: | - gcloud components install kubectl gke-gcloud-auth-plugin --quiet - gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --region ${{ env.REGION }} - - - name: Setup helm - run: | - curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 - chmod +x get_helm.sh - sudo ./get_helm.sh - rm get_helm.sh - - - name: Bootstrap network - run: | - set -eu -o pipefail - - port_forward_pids=() - - cleanup() { - echo "Cleaning up port-forwards..." - for pid in "${port_forward_pids[@]}"; do - kill $pid 2>/dev/null - done - } - - trap cleanup EXIT - - echo "Waiting for PXE pods to be ready..." - if ! kubectl wait -n $NAMESPACE --for=condition=ready pod -l app=pxe --timeout=10m; then - echo "Error: PXE pods did not become ready within timeout" - exit 1 - fi - - helm get values $NAMESPACE -n $NAMESPACE -o json --all > helm_values.json - - PXE_PORT="$(jq -r .pxe.service.nodePort helm_values.json)" - FAUCET_PORT="$(jq -r .faucet.apiServerPort helm_values.json)" - ETHEREUM_PORT="$(jq -r .ethereum.execution.service.port helm_values.json)" - L1_CHAIN_ID="$(jq -r .ethereum.chainId helm_values.json)" - - MNEMONIC="$(jq -r .aztec.l1DeploymentMnemonic helm_values.json)" - echo "::add-mask::$MNEMONIC" - - rm helm_values.json - - ADDRESS_INDEX=69 - - kubectl port-forward -n $NAMESPACE svc/$NAMESPACE-aztec-network-pxe $PXE_PORT &>/dev/null & - port_forward_pids+=($!) - - kubectl port-forward -n $NAMESPACE svc/$NAMESPACE-aztec-network-faucet-api $FAUCET_PORT &>/dev/null & - port_forward_pids+=($!) - - # port-forward directly to the pod because the Eth node does not have a service definition - ETH_POD_NAME=$(kubectl get pods -n $NAMESPACE -l app=eth-execution -o jsonpath='{.items[0].metadata.name}') - kubectl port-forward -n $NAMESPACE pod/$ETH_POD_NAME $ETHEREUM_PORT &>/dev/null & - port_forward_pids+=($!) - - # wait for port-forwards to establish - sleep 5 - - docker run --rm --network host $AZTEC_DOCKER_IMAGE node ./aztec/dest/bin/index.js bootstrap-network \ - --rpc-url http://127.0.0.1:$PXE_PORT \ - --l1-rpc-urls http://127.0.0.1:$ETHEREUM_PORT \ - --l1-chain-id "$L1_CHAIN_ID" \ - --mnemonic "$MNEMONIC" \ - --address-index "$ADDRESS_INDEX" \ - --json | tee ./basic_contracts.json - - aws s3 cp ./basic_contracts.json ${{ env.CONTRACT_S3_BUCKET }}/devnet/basic_contracts.json - - DEVCOIN_L1_ADDRESS=$(jq -r .devCoinL1 ./basic_contracts.json) - DEVCOIN_DRIP_AMOUNT=1000000000 - curl -X POST -d address=$DEVCOIN_L1_ADDRESS -d amount=$DEVCOIN_DRIP_AMOUNT \ - http://127.0.0.1:$FAUCET_PORT/l1-asset diff --git a/.github/workflows/docs-typesense.yml b/.github/workflows/docs-typesense.yml index c9ea468cc9ed..3af5964e8725 100644 --- a/.github/workflows/docs-typesense.yml +++ b/.github/workflows/docs-typesense.yml @@ -3,8 +3,8 @@ name: Docs Scraper on: workflow_dispatch: schedule: - # Run the workflow every night at 3:00 AM UTC, after nightly release - - cron: "0 4 * * *" + # Run the workflow every night at 5:00 AM UTC, after nightly release and docs update + - cron: "0 5 * * *" push: branches: - master diff --git a/.github/workflows/ensure-funded-environment.yml b/.github/workflows/ensure-funded-environment.yml new file mode 100644 index 000000000000..83c7a19d45de --- /dev/null +++ b/.github/workflows/ensure-funded-environment.yml @@ -0,0 +1,130 @@ +# Ensure a single environment has all publisher keys funded +# This workflow checks and funds publisher keys for a given environment +name: Ensure Funded Environment + +on: + workflow_call: + inputs: + environment: + description: 'Environment to fund (e.g., staging-public, next-net, staging-ignition, testnet)' + required: true + type: string + low_watermark: + description: 'Minimum ETH balance (default: 0.5)' + required: false + type: string + default: '0.5' + high_watermark: + description: 'Target ETH balance when funding (default: 1.0)' + required: false + type: string + default: '1.0' + workflow_dispatch: + inputs: + environment: + description: 'Environment to fund' + required: true + type: choice + options: + - staging-public + - next-net + - staging-ignition + - testnet + low_watermark: + description: 'Minimum ETH balance' + required: false + type: string + default: '0.5' + high_watermark: + description: 'Target ETH balance when funding' + required: false + type: string + default: '1.0' + +concurrency: + group: ensure-funded-environment-${{ inputs.environment }} + cancel-in-progress: false # Don't cancel funding operations + +jobs: + ensure-funded: + runs-on: ubuntu-latest + env: + GOOGLE_APPLICATION_CREDENTIALS: /tmp/gcp-key.json + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + persist-credentials: false + + - name: Validate inputs + run: | + # Validate environment + if [[ ! -f "spartan/environments/${{ inputs.environment }}.env" ]]; then + echo "Error: Environment file not found for environment '${{ inputs.environment }}'" + echo "Available environments:" + ls -1 spartan/environments/ | grep -v '\.local\.env$' || echo "No environment files found" + exit 1 + fi + + - name: Store the GCP key in a file + env: + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + run: | + set +x + umask 077 + printf '%s' "$GCP_SA_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS" + jq -e . "$GOOGLE_APPLICATION_CREDENTIALS" >/dev/null + + - name: Setup GCP authentication + run: | + gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS" + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Install bc + run: sudo apt-get update && sudo apt-get install -y bc + + - name: Fetch secrets from GCP + env: + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + run: | + # Fetch the environment-specific secrets + FUNDING_PRIVATE_KEY=$(gcloud secrets versions access latest --secret="sepolia-funding-private-key" --project="$GCP_PROJECT_ID") + + # Export to environment + echo "FUNDING_PRIVATE_KEY=$FUNDING_PRIVATE_KEY" >> $GITHUB_ENV + + - name: Ensure funded environment + env: + FUNDING_PRIVATE_KEY: ${{ env.FUNDING_PRIVATE_KEY }} + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + run: | + cd spartan/scripts + + # Run the funding script + ./ensure_funded_environment.sh \ + "${{ inputs.environment }}" \ + "$FUNDING_PRIVATE_KEY" \ + "${{ inputs.low_watermark }}" \ + "${{ inputs.high_watermark }}" + + - name: Notify Slack on failure + if: failure() + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: | + if [ -n "${SLACK_BOT_TOKEN}" ]; then + read -r -d '' data <" + } + EOF + curl -X POST https://slack.com/api/chat.postMessage \ + -H "Authorization: Bearer $SLACK_BOT_TOKEN" \ + -H "Content-type: application/json" \ + --data "$data" + fi diff --git a/.github/workflows/ensure-funded-environments.yml b/.github/workflows/ensure-funded-environments.yml new file mode 100644 index 000000000000..ea5f12958521 --- /dev/null +++ b/.github/workflows/ensure-funded-environments.yml @@ -0,0 +1,57 @@ +# Ensure multiple environments have all publisher keys funded +# This workflow checks and funds publisher keys for staging-public, next-net, staging-ignition, and testnet +name: Ensure Funded Environments + +on: + workflow_dispatch: + inputs: + low_watermark: + description: 'Minimum ETH balance' + required: false + type: string + default: '0.5' + high_watermark: + description: 'Target ETH balance when funding' + required: false + type: string + default: '1.0' + schedule: + # Run daily at 2 AM UTC + - cron: '0 2 * * *' + +concurrency: + group: ensure-funded-environments + cancel-in-progress: false # Don't cancel funding operations + +jobs: + # fund-staging-public: + # uses: ./.github/workflows/ensure-funded-environment.yml + # with: + # environment: staging-public + # low_watermark: ${{ inputs.low_watermark || '0.5' }} + # high_watermark: ${{ inputs.high_watermark || '1.0' }} + # secrets: inherit + + fund-next-net: + uses: ./.github/workflows/ensure-funded-environment.yml + with: + environment: next-net + low_watermark: ${{ inputs.low_watermark || '0.5' }} + high_watermark: ${{ inputs.high_watermark || '1.0' }} + secrets: inherit + + # fund-staging-ignition: + # uses: ./.github/workflows/ensure-funded-environment.yml + # with: + # environment: staging-ignition + # low_watermark: ${{ inputs.low_watermark || '0.5' }} + # high_watermark: ${{ inputs.high_watermark || '1.0' }} + # secrets: inherit + + # fund-testnet: + # uses: ./.github/workflows/ensure-funded-environment.yml + # with: + # environment: testnet + # low_watermark: ${{ inputs.low_watermark || '0.5' }} + # high_watermark: ${{ inputs.high_watermark || '1.0' }} + # secrets: inherit diff --git a/.github/workflows/fund-sepolia-accounts.yml b/.github/workflows/fund-sepolia-accounts.yml index 9703a458dd9c..dfedb4404769 100644 --- a/.github/workflows/fund-sepolia-accounts.yml +++ b/.github/workflows/fund-sepolia-accounts.yml @@ -81,7 +81,6 @@ jobs: else echo "new_mnemonic=true" >> "$GITHUB_OUTPUT" fi - - name: Fund accounts id: fund-accounts run: | @@ -92,19 +91,16 @@ jobs: export MNEMONIC="${{ steps.get-mnemonic.outputs.mnemonic }}" echo "Using mnemonic from GCP" fi - REPO=$(git rev-parse --show-toplevel) MNEMONIC_FILE=$(mktemp) export FUNDING_PRIVATE_KEY=${{ secrets.SEPOLIA_FUNDING_PRIVATE_KEY }} export ETHEREUM_HOST="https://json-rpc.${{ secrets.GCP_SEPOLIA_URL }}?key=${{ secrets.GCP_SEPOLIA_API_KEY }}" - echo "Funding accounts..." $REPO/spartan/scripts/prepare_sepolia_accounts.sh ${{ inputs.values_file }} ${{ inputs.funding_amount }} "$MNEMONIC_FILE" mnemonic=$(cat "$MNEMONIC_FILE") rm "$MNEMONIC_FILE" echo "::add-mask::$mnemonic" echo "mnemonic=$mnemonic" >> "$GITHUB_OUTPUT" - - name: Save mnemonic to GCP if: ${{ steps.get-mnemonic.outputs.new_mnemonic == 'true' }} run: | diff --git a/.github/workflows/merge-train-create-pr.yml b/.github/workflows/merge-train-create-pr.yml index c4c047ab1a98..bfa078dca595 100644 --- a/.github/workflows/merge-train-create-pr.yml +++ b/.github/workflows/merge-train-create-pr.yml @@ -11,32 +11,47 @@ jobs: permissions: contents: read pull-requests: write - + steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - + with: + fetch-depth: 0 + - name: Check if PR exists and create if needed env: GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} run: | - branch="${{ github.ref_name }}" - - # Check if PR already exists for this branch - existing_pr=$(gh pr list --state open --head "$branch" --json number --jq '.[0].number // empty') - - if [[ -z "$existing_pr" ]]; then - echo "No PR exists for $branch, creating one" - - # Determine base branch (default to next) - base_branch="next" - - # Create PR with ci-no-squash label - gh pr create --base "$base_branch" --head "$branch" \ - --title "feat: $branch" \ - --body "$(echo -e "See [merge-train-readme.md](https://github.com/${{ github.repository }}/blob/next/.github/workflows/merge-train-readme.md).\nThis is a merge-train.")" \ - --label "ci-no-squash" - - echo "Created PR for $branch" - else - echo "PR #$existing_pr already exists for $branch" - fi \ No newline at end of file + branch="${{ github.ref_name }}" + + # Skip if this is a merge commit (check for multiple parents) + parent_count=$(git rev-list --parents -n 1 "${{ github.sha }}" | wc -w) + if [[ $parent_count -gt 2 ]]; then + echo "Skipping: This is a merge commit." + exit 0 + fi + + # Skip if this commit is already in the next branch + if git merge-base --is-ancestor "${{ github.sha }}" origin/next; then + echo "Skipping: This commit is already in the next branch" + exit 0 + fi + + # Check if PR already exists for this branch + existing_pr=$(gh pr list --state open --head "$branch" --json number --jq '.[0].number // empty') + + if [[ -z "$existing_pr" ]]; then + echo "No PR exists for $branch, creating one" + + # Determine base branch (default to next) + base_branch="next" + + # Create PR with ci-no-squash label + gh pr create --base "$base_branch" --head "$branch" \ + --title "feat: $branch" \ + --body "$(echo -e "See [merge-train-readme.md](https://github.com/${{ github.repository }}/blob/next/.github/workflows/merge-train-readme.md).\nThis is a merge-train.")" \ + --label "ci-no-squash" + + echo "Created PR for $branch" + else + echo "PR #$existing_pr already exists for $branch" + fi diff --git a/.github/workflows/merge-train-recreate.yml b/.github/workflows/merge-train-recreate.yml index 912cd740dc57..4471f75d3fe2 100644 --- a/.github/workflows/merge-train-recreate.yml +++ b/.github/workflows/merge-train-recreate.yml @@ -3,7 +3,7 @@ name: Merge-Train Recreate on: pull_request: types: [closed] -≈ + jobs: recreate: if: ${{ github.event_name == 'workflow_dispatch' || (github.event.pull_request.merged && startsWith(github.event.pull_request.head.ref, 'merge-train/')) }} diff --git a/.github/workflows/metrics-deploy.yml b/.github/workflows/metrics-deploy.yml index 0f2b20a990c7..ea781d7d4a90 100644 --- a/.github/workflows/metrics-deploy.yml +++ b/.github/workflows/metrics-deploy.yml @@ -31,17 +31,12 @@ on: description: The branch name to deploy from required: false type: string - default: "master" + default: "next" grafana_dashboard_password_secret_name: description: The name of the secret which holds the Grafana dashboard password required: true type: string default: "grafana-dashboard-password" - slack_webhook_url_secret_name: - description: The name of the secret which holds the Slack webhook URL - required: true - type: string - default: "slack-webhook-url" secrets: GCP_SA_KEY: required: true @@ -70,15 +65,11 @@ on: ref: description: The branch name to deploy from required: false - default: "master" + default: "next" grafana_dashboard_password_secret_name: description: The name of the secret which holds the Grafana dashboard password required: true default: "grafana-dashboard-password" - slack_webhook_url_secret_name: - description: The name of the secret which holds the Slack webhook URL - required: true - default: "slack-webhook-url" jobs: metrics_deployment: @@ -98,7 +89,12 @@ jobs: TF_STATE_BUCKET: aztec-terraform GKE_CLUSTER_CONTEXT: "gke_testnet-440309_us-west1-a_${{ inputs.cluster }}" GRAFANA_DASHBOARD_PASSWORD_SECRET_NAME: ${{ inputs.grafana_dashboard_password_secret_name }} - SLACK_WEBHOOK_URL_SECRET_NAME: ${{ inputs.slack_webhook_url_secret_name }} + SLACK_WEBHOOK_URL_SECRET_NAME: slack-webhook-url + SLACK_WEBHOOK_STAGING_PUBLIC_SECRET_NAME: slack-webhook-staging-public-url + SLACK_WEBHOOK_STAGING_IGNITION_SECRET_NAME: slack-webhook-staging-ignition-url + SLACK_WEBHOOK_NEXT_SCENARIO_SECRET_NAME: slack-webhook-next-scenario-url + SLACK_WEBHOOK_TESTNET_SECRET_NAME: slack-webhook-testnet-url + SLACK_WEBHOOK_MAINNET_SECRET_NAME: slack-webhook-mainnet-url steps: - name: Checkout code @@ -160,6 +156,9 @@ jobs: -var="GKE_CLUSTER_CONTEXT=${{ env.GKE_CLUSTER_CONTEXT }}" \ -var="GRAFANA_PASSWORD_SECRET_NAME=${{ env.GRAFANA_DASHBOARD_PASSWORD_SECRET_NAME }}" \ -var="SLACK_WEBHOOK_SECRET_NAME=${{ env.SLACK_WEBHOOK_URL_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_STAGING_PUBLIC_SECRET_NAME=${{ env.SLACK_WEBHOOK_STAGING_PUBLIC_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_STAGING_IGNITION_SECRET_NAME=${{ env.SLACK_WEBHOOK_STAGING_IGNITION_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_NEXT_SCENARIO_SECRET_NAME=${{ env.SLACK_WEBHOOK_NEXT_SCENARIO_SECRET_NAME }}" \ -lock=${{ inputs.respect_tf_lock }} - name: Terraform Plan @@ -171,6 +170,9 @@ jobs: -var="GKE_CLUSTER_CONTEXT=${{ env.GKE_CLUSTER_CONTEXT }}" \ -var="GRAFANA_PASSWORD_SECRET_NAME=${{ env.GRAFANA_DASHBOARD_PASSWORD_SECRET_NAME }}" \ -var="SLACK_WEBHOOK_SECRET_NAME=${{ env.SLACK_WEBHOOK_URL_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_STAGING_PUBLIC_SECRET_NAME=${{ env.SLACK_WEBHOOK_STAGING_PUBLIC_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_STAGING_IGNITION_SECRET_NAME=${{ env.SLACK_WEBHOOK_STAGING_IGNITION_SECRET_NAME }}" \ + -var="SLACK_WEBHOOK_NEXT_SCENARIO_SECRET_NAME=${{ env.SLACK_WEBHOOK_NEXT_SCENARIO_SECRET_NAME }}" \ -out=tfplan \ -lock=${{ inputs.respect_tf_lock }} diff --git a/.github/workflows/network-test.yml b/.github/workflows/network-test.yml deleted file mode 100644 index e4766a23548b..000000000000 --- a/.github/workflows/network-test.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: Aztec Network Test - -on: - workflow_dispatch: - inputs: - cluster: - description: The cluster to deploy to, e.g. aztec-gke-private - required: true - default: "aztec-gke-private" - namespace: - description: The namespace to deploy to, e.g. smoke - required: true - test: - description: The test to run, e.g. spartan/smoke.test.ts - required: true - -jobs: - network_test: - runs-on: ubuntu-latest - - env: - TEST_DOCKER_IMAGE: ${{ inputs.aztec_e2e_docker_image }} - NAMESPACE: ${{ inputs.namespace }} - TEST: ${{ inputs.test }} - CHART_PATH: ./spartan/aztec-network - CLUSTER_NAME: ${{ inputs.cluster }} - REGION: us-west1-a - PROJECT_ID: testnet-440309 - GKE_CLUSTER_CONTEXT: "gke_testnet-440309_us-west1-a_${{ inputs.cluster }}" - - steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - - name: Authenticate to Google Cloud - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f - with: - credentials_json: ${{ secrets.GCP_SA_KEY }} - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a - with: - install_components: gke-gcloud-auth-plugin - - - name: Configure kubectl with GKE cluster - run: | - gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --region ${{ env.REGION }} - - - name: Run test - run: | - - # Find 3 free ports between 9000 and 10000 - FREE_PORTS=$(comm -23 <(seq 9000 10000 | sort) <(ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 3) - - # Extract the free ports from the list - PXE_PORT=$(echo $FREE_PORTS | awk '{print $1}') - ANVIL_PORT=$(echo $FREE_PORTS | awk '{print $2}') - METRICS_PORT=$(echo $FREE_PORTS | awk '{print $3}') - - export GRAFANA_PASSWORD=$(kubectl get secrets -n metrics metrics-grafana -o jsonpath='{.data.admin-password}' | base64 --decode) - - gcloud config set project ${{ env.PROJECT_ID }} - - GCLOUD_CONFIG_DIR=$(gcloud info --format='value(config. paths. global_config_dir)') - - echo "gcloud config dir: [$GCLOUD_CONFIG_DIR]" - - docker run --rm --network=host \ - -v ~/.kube:/root/.kube \ - -v $GCLOUD_CONFIG_DIR:/root/.config/gcloud \ - -e K8S=gcloud \ - -e CLUSTER_NAME=${{ env.CLUSTER_NAME }} \ - -e REGION=${{ env.REGION }} \ - -e INSTANCE_NAME=${{ env.NAMESPACE }} \ - -e SPARTAN_DIR="/usr/src/spartan" \ - -e NAMESPACE=${{ env.NAMESPACE }} \ - -e HOST_PXE_PORT=$PXE_PORT \ - -e CONTAINER_PXE_PORT=8081 \ - -e HOST_ETHEREUM_PORT=$ANVIL_PORT \ - -e CONTAINER_ETHEREUM_PORT=8545 \ - -e HOST_METRICS_PORT=$METRICS_PORT \ - -e CONTAINER_METRICS_PORT=80 \ - -e GRAFANA_PASSWORD=$GRAFANA_PASSWORD \ - -e DEBUG="aztec:*" \ - -e LOG_JSON=1 \ - -e LOG_LEVEL=debug \ - ${{ env.TEST_DOCKER_IMAGE }} ${{ env.TEST }} diff --git a/.github/workflows/nightly-devnet-test.yml b/.github/workflows/nightly-devnet-test.yml deleted file mode 100644 index 63bf5cd691bb..000000000000 --- a/.github/workflows/nightly-devnet-test.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Devnet network tests -on: - workflow_dispatch: - schedule: - # Run nightly at 0300 - - cron: "0 3 * * *" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - # This path is a workaround because the runner does not have perms to upload files anywhere else - STATE_S3_BASE_PATH: s3://aztec-ci-artifacts/build-cache - STATE_S3_KEY: build-cache/devnet-nightly-tests-state.json - AZTEC_VERSION: alpha-testnet - NODE_URL: http://34.169.170.55:8080 - L1_URL: http://34.169.72.63:8545 - FAUCET_URL: http://34.169.129.31:8086 - -jobs: - cli-wallet: - runs-on: ubuntu-latest - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@6a64f289c4a4b67a1e2c44cc4bd9d6f7bc59b156 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - - - name: Checkout smoke tests (note that this is not pinned to a version, the CLI wallet installed is though) - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - with: - sparse-checkout: | - spartan/devnet-smoke-tests - - - name: Run smoke tests - run: | - ./spartan/devnet-smoke-tests/main.sh diff --git a/.github/workflows/nightly-docs-release.yml b/.github/workflows/nightly-docs-release.yml new file mode 100644 index 000000000000..68da87c5dc08 --- /dev/null +++ b/.github/workflows/nightly-docs-release.yml @@ -0,0 +1,161 @@ +name: Nightly Docs Release + +on: + schedule: + # Run every night at 4:00 AM UTC, after nightly tags are created + - cron: "0 4 * * *" + workflow_dispatch: + inputs: + tag: + description: "Specific nightly tag to use (e.g., v3.0.0-nightly.20241201)" + required: false + +permissions: + contents: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + +jobs: + check-nightly-tag: + name: Check for new nightly tag + runs-on: ubuntu-latest + outputs: + nightly-tag: ${{ steps.get_tag.outputs.tag }} + should-run: ${{ steps.get_tag.outputs.should_run }} + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + fetch-depth: 0 + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + + - name: Get latest nightly tag + id: get_tag + run: | + if [ -n "${{ github.event.inputs.tag }}" ]; then + # Manual trigger with specific tag + NIGHTLY_TAG="${{ github.event.inputs.tag }}" + echo "Using manual tag: $NIGHTLY_TAG" + else + # Get today's nightly tag + current_version=$(jq -r '."."' .release-please-manifest.json) + NIGHTLY_TAG="v${current_version}-nightly.$(date -u +%Y%m%d)" + echo "Expected nightly tag: $NIGHTLY_TAG" + + # Check if the tag exists + if ! git tag -l | grep -q "^$NIGHTLY_TAG$"; then + echo "Nightly tag $NIGHTLY_TAG does not exist yet. Skipping docs release." + echo "should_run=false" >> $GITHUB_OUTPUT + exit 0 + fi + fi + + # Check if we already have docs for this nightly version + DOCS_VERSION_DIR="docs/versioned_docs/version-$NIGHTLY_TAG" + BB_DOCS_VERSION_DIR="barretenberg/docs/versioned_docs/version-$NIGHTLY_TAG" + + if [ -d "$DOCS_VERSION_DIR" ] || [ -d "$BB_DOCS_VERSION_DIR" ]; then + echo "Docs already exist for $NIGHTLY_TAG. Skipping." + echo "should_run=false" >> $GITHUB_OUTPUT + else + echo "tag=$NIGHTLY_TAG" >> $GITHUB_OUTPUT + echo "should_run=true" >> $GITHUB_OUTPUT + echo "Will create docs for: $NIGHTLY_TAG" + fi + + create-nightly-docs: + name: Create nightly documentation + needs: check-nightly-tag + if: needs.check-nightly-tag.outputs.should-run == 'true' + runs-on: ubuntu-latest + env: + NIGHTLY_TAG: ${{ needs.check-nightly-tag.outputs.nightly-tag }} + + steps: + - name: Checkout at nightly tag + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ env.NIGHTLY_TAG }} + fetch-depth: 0 + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + + - name: Setup dependencies + run: | + sudo apt install -y --no-install-recommends doxygen + corepack enable + + - name: Configure Git + run: | + git config --global user.name AztecBot + git config --global user.email tech@aztecprotocol.com + + - name: Cleanup Aztec docs nightly versions + working-directory: ./docs + run: | + ./scripts/cleanup_nightly_versions.sh + + - name: Create Aztec nightly docs version + working-directory: ./docs + run: | + # Set the commit tag for version macros + export COMMIT_TAG=${{ env.NIGHTLY_TAG }} + + # Install dependencies + yarn install + + # Build docs to ensure everything works + COMMIT_TAG=${{ env.NIGHTLY_TAG }} yarn build + + # Create the versioned docs + yarn docusaurus docs:version ${{ env.NIGHTLY_TAG }} + + echo "Created Aztec docs version: ${{ env.NIGHTLY_TAG }}" + + - name: Update Aztec Docs versions.json with new version + working-directory: ./docs/scripts + run: | + ./update_versions.sh + + - name: Cleanup Barretenberg docs nightly versions + working-directory: ./barretenberg/docs + run: | + ./scripts/cleanup_nightly_versions.sh + + - name: Create Barretenberg nightly docs version + working-directory: ./barretenberg/docs + run: | + # Set the commit tag for version macros + export COMMIT_TAG=${{ env.NIGHTLY_TAG }} + + # Install dependencies + yarn install + + # Build docs to ensure everything works + yarn build + + # Create the versioned docs + yarn docusaurus docs:version ${{ env.NIGHTLY_TAG }} + + echo "Created Barretenberg docs version: ${{ env.NIGHTLY_TAG }}" + + - name: Update Barretenberg docs versions.json with new version + working-directory: ./barretenberg/docs/scripts + run: | + ./update_versions.sh + + - name: Commit new Aztec and Barretenberg Docs version + run: | + # Stash the docs changes + git add . + git stash push --staged -m "nightly docs for ${{ env.NIGHTLY_TAG }}" + + # Checkout the next branch + git fetch origin next + git checkout next + + # Apply the stashed changes and commit + git stash pop + git add . + git commit -m "chore(docs): cut new aztec and bb docs version for tag ${{ env.NIGHTLY_TAG }}" + git push origin next diff --git a/.github/workflows/nightly-nextnet-deploy.yml b/.github/workflows/nightly-nextnet-deploy.yml deleted file mode 100644 index 5f5de72f15fd..000000000000 --- a/.github/workflows/nightly-nextnet-deploy.yml +++ /dev/null @@ -1,62 +0,0 @@ -# TOOD(#10775): see 'releases'. We want to move away from this and use a bootstrap-oriented flow with our nightly releases. -name: Nightly nextnet deploy -on: - schedule: - # Run the workflow every night at 4:00 AM UTC. After the nightly tag has been created - - cron: "0 4 * * *" - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -jobs: - get-latest-tag: - runs-on: ubuntu-latest - outputs: - tag: ${{ steps.get_tag.outputs.TAG }} - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - - name: Get latest nightly tag - id: get_tag - run: | - # Check if tonight's tag has been published to docker hub - current_version=$(jq -r '."."' .release-please-manifest.json) - echo "Current version: $current_version" - # Compute the next major version. e.g. if current version is 1.2.3, next major version is 2.0.0. - if [[ "$current_version" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then - major=$(( ${BASH_REMATCH[1]} + 1 )) - next_major_version="${major}.0.0" - else - echo "Error: Current version format is invalid: $current_version" - exit 1 - fi - # Format the tag as: -nightly. - nightly_tag="${next_major_version}-nightly.$(date -u +%Y%m%d)-amd64" - - TAGS=$(curl -s https://registry.hub.docker.com/v2/repositories/aztecprotocol/aztec/tags/$nightly_tag) - if [[ "$TAGS" != *"not found"* ]]; then - echo "TAG=$nightly_tag" >> "$GITHUB_OUTPUT" - else - echo "Tag $nightly_tag not published to docker hub" - exit 1 - fi - - deploy-network: - needs: get-latest-tag - uses: ./.github/workflows/network-deploy.yml - with: - ref: next - cluster: aztec-gke-private - namespace: next-rc-1 - values_file: rc-1.yaml - aztec_docker_image: aztecprotocol/aztec:${{ needs.get-latest-tag.outputs.tag }} - deployment_mnemonic_secret_name: junk-mnemonic - respect_tf_lock: "false" - run_terraform_destroy: "true" - sepolia_deployment: "false" - skip_funding: true - secrets: - GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} diff --git a/.github/workflows/nightly-release-tag.yml b/.github/workflows/nightly-release-tag.yml index 76dc386377b0..1605e3a30f04 100644 --- a/.github/workflows/nightly-release-tag.yml +++ b/.github/workflows/nightly-release-tag.yml @@ -27,17 +27,8 @@ jobs: git config --global user.email "tech@aztecprotocol.com" git config --global user.name "AztecBot" current_version=$(jq -r '."."' .release-please-manifest.json) - # Compute the next major version. e.g. if current version is 1.2.3, next major version is 2.0.0. - if [[ "$current_version" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then - major=$(( ${BASH_REMATCH[1]} + 1 )) - next_major_version="${major}.0.0" - else - echo "Error: Current version format is invalid: $current_version" - exit 1 - fi echo "Current version: $current_version" - echo "Next version: $next_major_version" - nightly_tag="v${next_major_version}-nightly.$(date -u +%Y%m%d)" + nightly_tag="v${current_version}-nightly.$(date -u +%Y%m%d)" echo "Nightly tag: $nightly_tag" # Tag and push. git tag -a "$nightly_tag" -m "$nightly_tag" diff --git a/.github/workflows/publish-bb-mac.yml b/.github/workflows/publish-bb-mac.yml index 8a9d0f93f9bc..d5e52e425b1f 100644 --- a/.github/workflows/publish-bb-mac.yml +++ b/.github/workflows/publish-bb-mac.yml @@ -43,11 +43,11 @@ jobs: optional: false - label: amd64-darwin-starknet runner: macos-13 - cmake_flags: "-DCMAKE_CXX_FLAGS=\"-DSTARKNET_GARAGA_FLAVORS=1\"" + cmake_flags: '-DCMAKE_CXX_FLAGS="-DSTARKNET_GARAGA_FLAVORS=1"' optional: true - label: arm64-darwin-starknet runner: macos-14 - cmake_flags: "-DCMAKE_CXX_FLAGS=\"-DSTARKNET_GARAGA_FLAVORS=1\"" + cmake_flags: '-DCMAKE_CXX_FLAGS="-DSTARKNET_GARAGA_FLAVORS=1"' optional: true steps: - name: Checkout @@ -57,7 +57,7 @@ jobs: - name: Create Mac Build Environment run: | - brew install cmake ninja llvm@18 + brew install cmake ninja llvm@20 echo "BREW_PREFIX=$(brew --prefix)" >> $GITHUB_ENV - name: Replace version string in main.cpp @@ -69,14 +69,14 @@ jobs: - name: Compile Barretenberg working-directory: barretenberg/cpp - continue-on-error: ${{ matrix.optional }} + continue-on-error: ${{ matrix.optional }} run: | cmake --preset homebrew ${{ matrix.cmake_flags }} cmake --build --preset homebrew --target bb - name: Package barretenberg artifact (${{ matrix.label }}) working-directory: barretenberg/cpp/build/bin - continue-on-error: ${{ matrix.optional }} + continue-on-error: ${{ matrix.optional }} run: | mkdir dist cp ./bb ./dist/bb @@ -84,7 +84,7 @@ jobs: - name: Upload artifact (${{ matrix.label }}) uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 - continue-on-error: ${{ matrix.optional }} + continue-on-error: ${{ matrix.optional }} with: name: barretenberg-${{ matrix.label }}.tar.gz path: ./barretenberg/cpp/build/bin/barretenberg-${{ matrix.label }}.tar.gz diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index ec8c6afee09d..50843e5fa3a8 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -3,6 +3,7 @@ name: release-please on: push: branches: + - "v[0-9]*" - master permissions: @@ -27,6 +28,110 @@ jobs: config-file: .github/release-please-${{ github.ref_name }}.json target-branch: ${{ github.ref_name }} + auto-tag: + needs: [release-please] + if: ${{ startsWith(github.ref, 'refs/heads/v') }} + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + fetch-depth: 0 + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + + - name: Configure Git + run: | + git config --global user.name AztecBot + git config --global user.email tech@aztecprotocol.com + + - name: Extract version from branch name + id: version + run: | + BRANCH_NAME="${GITHUB_REF#refs/heads/}" + MAJOR_VERSION="${BRANCH_NAME#v}" + echo "branch=$BRANCH_NAME" >> $GITHUB_OUTPUT + echo "major=$MAJOR_VERSION" >> $GITHUB_OUTPUT + + - name: Determine next version + id: next_version + run: | + # Read current version from release-please manifest and use it directly + if [ ! -f .release-please-manifest.json ]; then + echo "Error: .release-please-manifest.json not found" + exit 1 + fi + + MANIFEST_VERSION=$(jq -r '."."' .release-please-manifest.json) + echo "Manifest version: $MANIFEST_VERSION" + + # Ensure manifest major matches vX branch major + BRANCH_MAJOR="${{ steps.version.outputs.major }}" + if [ -z "$BRANCH_MAJOR" ]; then + echo "Error: Branch major version not available from previous step" + exit 1 + fi + + MANIFEST_MAJOR="${MANIFEST_VERSION%%.*}" + if [ "$MANIFEST_MAJOR" != "$BRANCH_MAJOR" ]; then + echo "Error: Manifest major ($MANIFEST_MAJOR) does not match branch major ($BRANCH_MAJOR)" + exit 1 + fi + + if [[ "$MANIFEST_VERSION" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then + NEXT_VERSION="$MANIFEST_VERSION" + else + echo "Error: Invalid manifest version format" + exit 1 + fi + + echo "version=$NEXT_VERSION" >> $GITHUB_OUTPUT + echo "Next version will be: $NEXT_VERSION" + + - name: Get next RC number + id: rc + run: | + # Get all existing RC tags for this specific version + EXISTING_TAGS=$(git tag -l "v${{ steps.next_version.outputs.version }}-rc.*" | sort -V) + + if [ -z "$EXISTING_TAGS" ]; then + # No RC tags exist yet for this version, start with 1 + NEXT_RC=1 + else + # Extract the highest RC number and increment + LAST_TAG=$(echo "$EXISTING_TAGS" | tail -n 1) + LAST_RC=$(echo "$LAST_TAG" | sed 's/.*-rc\.\([0-9]\+\)$/\1/') + NEXT_RC=$((LAST_RC + 1)) + fi + + echo "number=$NEXT_RC" >> $GITHUB_OUTPUT + echo "Next RC number: $NEXT_RC" + + - name: Check if commit already tagged + id: check_tag + run: | + # Check if current commit already has any RC tag + EXISTING_TAGS=$(git tag --points-at HEAD | grep "^v.*-rc\." || true) + + if [ -n "$EXISTING_TAGS" ]; then + echo "Commit already tagged as: $EXISTING_TAGS" + echo "skip=true" >> $GITHUB_OUTPUT + else + echo "skip=false" >> $GITHUB_OUTPUT + fi + + - name: Create and push new RC tag + if: steps.check_tag.outputs.skip != 'true' + run: | + TAG_NAME="v${{ steps.next_version.outputs.version }}-rc.${{ steps.rc.outputs.number }}" + + # Create annotated tag + git tag -a "$TAG_NAME" -m "Release candidate ${{ steps.rc.outputs.number }} for v${{ steps.next_version.outputs.version }}" + git push origin "$TAG_NAME" + + echo "✅ Created tag: $TAG_NAME" + update-docs: name: Update docs env: @@ -42,8 +147,10 @@ jobs: ref: ${{ fromJSON(needs.release-please.outputs.release-pr).headBranchName }} token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} - - name: Run corepack enable - run: corepack enable + - name: Setup dependencies + run: | + sudo apt install -y --no-install-recommends doxygen + corepack enable - name: Configure Git run: | diff --git a/.github/workflows/test-network-scenarios.yml b/.github/workflows/test-network-scenarios.yml new file mode 100644 index 000000000000..f05ebdc3e7e0 --- /dev/null +++ b/.github/workflows/test-network-scenarios.yml @@ -0,0 +1,148 @@ +# CI for Aztec Network Scenarios. +# Triggered by CI3 workflow completion on tagged releases. +# +name: Test Network Scenarios + +on: + workflow_run: + workflows: ["CI3"] + types: + - completed + workflow_dispatch: + inputs: + semver: + description: Semver version (e.g., 1.2.3) + required: true + type: string + +concurrency: + group: test-network-scenarios-${{ (github.event_name == 'workflow_run' && github.event.workflow_run.head_sha) || (github.event_name == 'workflow_dispatch' && inputs.semver) || github.sha }} + cancel-in-progress: true + +jobs: + deploy-and-test-scenarios: + if: | + (github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success') || + (github.event_name == 'workflow_dispatch') + runs-on: ubuntu-latest + env: + NETWORK_ENV_FILE: /tmp/network.env + GOOGLE_APPLICATION_CREDENTIALS: /tmp/gcp-key.json + steps: + ############# + # Prepare Env + ############# + - name: Checkout (workflow_run) + if: github.event_name == 'workflow_run' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event.workflow_run.head_sha }} + fetch-depth: 0 + persist-credentials: false + - name: Checkout (workflow_dispatch) + if: github.event_name == 'workflow_dispatch' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: refs/tags/v${{ inputs.semver }} + fetch-depth: 0 + persist-credentials: false + - name: Determine semver from tag + if: github.event_name == 'workflow_run' + run: | + git fetch --tags --force + tag=$(git tag --points-at "${{ github.event.workflow_run.head_sha }}" | head -n1) + if ! echo "$tag" | grep -Eq '^v[0-9]+\.[0-9]+\.[0-9]+'; then + echo "No semver tag found for head_sha: ${{ github.event.workflow_run.head_sha }}. Skipping." + exit 0 + fi + semver="${tag#v}" + major_version="${semver%%.*}" + echo "SEMVER=$semver" >> $GITHUB_ENV + echo "MAJOR_VERSION=$major_version" >> $GITHUB_ENV + - name: Set semver from input + if: github.event_name == 'workflow_dispatch' + run: | + semver="${{ inputs.semver }}" + major_version="${semver%%.*}" + echo "SEMVER=$semver" >> $GITHUB_ENV + echo "MAJOR_VERSION=$major_version" >> $GITHUB_ENV + - name: Setup + if: env.SEMVER != '' + run: | + # Ensure we can SSH into the spot instances we request. + mkdir -p ~/.ssh + echo ${{ secrets.BUILD_INSTANCE_SSH_KEY }} | base64 --decode > ~/.ssh/build_instance_key + chmod 600 ~/.ssh/build_instance_key + sudo apt install -y --no-install-recommends redis-tools parallel + + - name: Store the GCP key in a file + if: env.SEMVER != '' + env: + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + run: | + set +x + umask 077 + printf '%s' "$GCP_SA_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS" + jq -e . "$GOOGLE_APPLICATION_CREDENTIALS" >/dev/null + + # note: it is fine to log the mnemonic here. this is an internal, + # throwaway test network, which you can see gets destroyed before it is created each time. + - name: Write network env file + if: env.SEMVER != '' + run: | + NAMESPACE="v${MAJOR_VERSION}-scenario" + cat > ${{ env.NETWORK_ENV_FILE }} <> $GITHUB_ENV + + - name: Get Tree Hash + if: env.SEMVER != '' + run: echo "TREE_HASH=$(git rev-parse HEAD^{tree})" >> $GITHUB_ENV + + - name: Check CI Cache + id: ci_cache + if: env.SEMVER != '' + uses: actions/cache@v3 + with: + path: ci-success.txt + key: ci-network-scenario-${{ env.TREE_HASH }} + + ############# + # Run + ############# + - name: Run + if: env.SEMVER != '' && steps.ci_cache.outputs.cache-hit != 'true' + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + RUN_ID: ${{ github.run_id }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + NETWORK_ENV_FILE: ${{ env.NETWORK_ENV_FILE }} + GOOGLE_APPLICATION_CREDENTIALS: ${{ env.GOOGLE_APPLICATION_CREDENTIALS }} + NAMESPACE: ${{ env.NAMESPACE }} + REF_NAME: "v${{ env.SEMVER }}" + run: | + # the network env file and gcp credentials file are mounted into the ec2 instance + # see ci3/bootstrap_ec2 + exec ./ci.sh network-deploy + + - name: Save CI Success + if: env.SEMVER != '' && steps.ci_cache.outputs.cache-hit != 'true' + run: echo "success" > ci-success.txt diff --git a/.github/workflows/testnet-deploy.yml b/.github/workflows/testnet-deploy.yml deleted file mode 100644 index c88882f213e3..000000000000 --- a/.github/workflows/testnet-deploy.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Aztec Testnet Deployment - -on: - workflow_call: - inputs: - respect_tf_lock: - description: Whether to respect the Terraform lock - required: false - type: string - default: "true" - run_terraform_destroy: - description: Whether to run terraform destroy before deploying - required: false - type: string - default: "false" - aztec_docker_image: - description: The Aztec Docker image to use, e.g. aztecprotocol/aztec:latest - required: false - type: string - secrets: - GCP_SA_KEY: - required: true - workflow_dispatch: - inputs: - respect_tf_lock: - description: Whether to respect the Terraform lock - required: false - type: string - default: "true" - run_terraform_destroy: - description: Whether to run terraform destroy before deploying - required: false - type: string - default: "false" - aztec_docker_image: - description: The Aztec Docker image to use, e.g. aztecprotocol/aztec:latest - required: false - type: string - -jobs: - testnet_deployment: - runs-on: ubuntu-latest - concurrency: - group: deploy-testnet # Only one job globally - cancel-in-progress: false # Allow previous deployment to complete to avoid corruption - - env: - AZTEC_DOCKER_IMAGE: ${{ inputs.aztec_docker_image }} - CLUSTER_NAME: aztec-gke-public - REGION: us-west1-a - TF_STATE_BUCKET: aztec-terraform - GKE_CLUSTER_CONTEXT: "gke_testnet-440309_us-west1-a_aztec-gke-public" - - steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - with: - ref: ${{ inputs.ref }} - - - name: Authenticate to Google Cloud - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f - with: - credentials_json: ${{ secrets.GCP_SA_KEY }} - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a - - - name: Install GKE Auth Plugin - run: | - gcloud components install gke-gcloud-auth-plugin --quiet - - - name: Configure kubectl with GKE cluster - run: | - gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --region ${{ env.REGION }} - - - name: Ensure Terraform state bucket exists - run: | - if ! gsutil ls gs://${{ env.TF_STATE_BUCKET }} >/dev/null 2>&1; then - echo "Creating GCS bucket for Terraform state..." - gsutil mb -l us-east4 gs://${{ env.TF_STATE_BUCKET }} - gsutil versioning set on gs://${{ env.TF_STATE_BUCKET }} - else - echo "Terraform state bucket already exists" - fi - - - name: Setup Terraform - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 - with: - terraform_version: "1.5.0" - - - name: Terraform Init - working-directory: ./spartan/terraform/deploy-testnet - run: | - terraform init - - - name: Terraform Destroy - working-directory: ./spartan/terraform/deploy-testnet - if: ${{ inputs.run_terraform_destroy == 'true' }} - # Destroy fails if the resources are already destroyed, so we continue on error - continue-on-error: true - run: | - terraform destroy -auto-approve \ - -lock=${{ inputs.respect_tf_lock }} - - - name: Terraform Plan - working-directory: ./spartan/terraform/deploy-testnet - run: | - terraform plan \ - -out=tfplan \ - -var="AZTEC_DOCKER_IMAGE=${AZTEC_DOCKER_IMAGE:-aztecprotocol/aztec:latest}" \ - -lock=${{ inputs.respect_tf_lock }} - - - name: Terraform Apply - working-directory: ./spartan/terraform/deploy-testnet - run: terraform apply -lock=${{ inputs.respect_tf_lock }} -auto-approve tfplan diff --git a/.github/workflows/update-red-team-image.yml b/.github/workflows/update-red-team-image.yml new file mode 100644 index 000000000000..0440d9761522 --- /dev/null +++ b/.github/workflows/update-red-team-image.yml @@ -0,0 +1,177 @@ +name: Update Red Team Image + +on: + workflow_dispatch: + inputs: + docker_image: + description: 'Full docker image tag (e.g., aztecprotocol/aztec:2.1.0-rc.1)' + required: true + type: string + +env: + TEAM_MAP: | + alexghr: 1 + koenmtb1: 2 + spalladino: 3 + PhilWindle: 4 + just-mitch: 5 + Thunkar: 6 + spypsy: 7 + mralj: 8 + +concurrency: + group: update-red-team-${{ github.actor }} + cancel-in-progress: false + +jobs: + update-red-team-image: + runs-on: ubuntu-latest + env: + GOOGLE_APPLICATION_CREDENTIALS: /tmp/gcp-key.json + steps: + - name: Checkout v2 branch for helm chart + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: red-team-go + fetch-depth: 0 + persist-credentials: false + + - name: Determine team number + id: team + run: | + USERNAME="${{ github.actor }}" + + # Parse team map (YAML format) + TEAM_NUM=$(echo "$TEAM_MAP" | grep "^${USERNAME}:" | cut -d':' -f2 | tr -d ' ' || echo "") + + if [[ -z "$TEAM_NUM" ]]; then + echo "Error: User '$USERNAME' is not mapped to any team" + echo "Please contact administrator to add your username to the team map" + exit 1 + fi + + if [[ ! "$TEAM_NUM" =~ ^[0-9]+$ ]]; then + echo "Error: Invalid team number '$TEAM_NUM' for user '$USERNAME'" + exit 1 + fi + + echo "team_num=$TEAM_NUM" >> $GITHUB_OUTPUT + echo "namespace=red-team-${TEAM_NUM}" >> $GITHUB_OUTPUT + + echo "User: $USERNAME" + echo "Team: $TEAM_NUM" + echo "Namespace: red-team-${TEAM_NUM}" + + - name: Validate docker image + id: image + run: | + IMAGE="${{ inputs.docker_image }}" + + # Validate image format (should contain : separator) + if [[ ! "$IMAGE" =~ : ]]; then + echo "Error: Invalid image format. Expected format: repository:tag" + exit 1 + fi + + # Split image into repository and tag + IMAGE_REPO=$(echo "$IMAGE" | cut -d: -f1) + IMAGE_TAG=$(echo "$IMAGE" | cut -d: -f2-) + + echo "repository=$IMAGE_REPO" >> $GITHUB_OUTPUT + echo "tag=$IMAGE_TAG" >> $GITHUB_OUTPUT + + echo "Image repository: $IMAGE_REPO" + echo "Image tag: $IMAGE_TAG" + + - name: Store the GCP key in a file + env: + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + run: | + set +x + umask 077 + printf '%s' "$GCP_SA_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS" + jq -e . "$GOOGLE_APPLICATION_CREDENTIALS" >/dev/null + + - name: Setup GCP authentication + run: | + gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS" + + - name: Setup gcloud and install GKE auth plugin + uses: google-github-actions/setup-gcloud@v2 + with: + install_components: 'gke-gcloud-auth-plugin' + + - name: Configure kubectl for GKE + env: + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + run: | + gcloud container clusters get-credentials aztec-gke-public \ + --region us-west1-a \ + --project "$GCP_PROJECT_ID" + + # Verify connectivity + kubectl cluster-info + echo "Current context: $(kubectl config current-context)" + + - name: Setup Helm + uses: azure/setup-helm@v4 + with: + version: 'v3.13.0' + + - name: Verify namespace and release exist + env: + NAMESPACE: ${{ steps.team.outputs.namespace }} + run: | + # Check if namespace exists + if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then + echo "Error: Namespace '$NAMESPACE' does not exist" + echo "Has this team been deployed yet?" + exit 1 + fi + + # Check if helm release exists + if ! helm list -n "$NAMESPACE" 2>/dev/null | grep -q "validator"; then + echo "Error: Helm release 'validator' not found in namespace '$NAMESPACE'" + echo "Has this team been deployed yet?" + exit 1 + fi + + echo "✓ Namespace and helm release verified" + + - name: Update image with helm upgrade + env: + NAMESPACE: ${{ steps.team.outputs.namespace }} + IMAGE_REPO: ${{ steps.image.outputs.repository }} + IMAGE_TAG: ${{ steps.image.outputs.tag }} + run: | + echo "Updating team ${{ steps.team.outputs.team_num }} to image: ${IMAGE_REPO}:${IMAGE_TAG}" + echo "" + + cd spartan + + helm upgrade validator ./aztec-validator \ + --namespace "$NAMESPACE" \ + --reuse-values \ + --set "global.aztecImage.repository=${IMAGE_REPO}" \ + --set "global.aztecImage.tag=${IMAGE_TAG}" \ + --wait \ + --timeout 10m + + echo "" + echo "✓ Helm upgrade completed" + + - name: Post logs on failure + if: failure() + env: + NAMESPACE: ${{ steps.team.outputs.namespace }} + run: | + echo "Deployment failed. Fetching diagnostic information..." + echo "" + echo "Pod status:" + kubectl get pods -n "$NAMESPACE" || true + echo "" + echo "Recent events:" + kubectl get events -n "$NAMESPACE" --sort-by='.lastTimestamp' | tail -20 || true + echo "" + echo "Pod logs:" + kubectl logs -n "$NAMESPACE" -l app=node --tail=50 || true diff --git a/.gitignore b/.gitignore index e1a20e414600..d1cc2efb63ef 100644 --- a/.gitignore +++ b/.gitignore @@ -29,5 +29,4 @@ docs/.yarn/install-state.gz docs/docs/protocol-specs/public-vm/gen/ # for those who use Claude Code -CLAUDE.md .claude diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 41ea87d76b9a..d4f6f2994ea4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.2.1" + ".": "3.0.0" } diff --git a/.test_patterns.yml b/.test_patterns.yml index 5b22a73a4b9a..30cea77b39b5 100644 --- a/.test_patterns.yml +++ b/.test_patterns.yml @@ -14,6 +14,7 @@ names: - akosh: &akosh "U07PQ3Y4GHJ" - alex: &alex "U05QWV669JB" - charlie: &charlie "UKUMA5J7K" + - david: &david "U03T2QRT1NW" - grego: &grego "U0689QRCE9L" - lasse: &lasse "U03E5SYLY3Z" - leila: &leila "UBLTU1NJ3" @@ -30,6 +31,7 @@ names: - saleel: &saleel "U07KJ0AV20J" - nico: &nico "U06D2QSLY10" - lucas: &lucas "U05MW7WQ8LQ" + - suyash: &suyash "U08QFSL6LPM" tests: # barretenberg @@ -45,18 +47,45 @@ tests: error_regex: "field_t::range_constraint" owners: - *luke - - regex: "barretenberg/acir_tests/scripts/run_test_browser.sh" + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh" error_regex: "Failed to fetch" owners: - *adam - - regex: "barretenberg/acir_tests/scripts/run_test_browser.sh" + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh" error_regex: "RuntimeError: Out of bounds memory access" owners: - *adam - - regex: "barretenberg/acir_tests/scripts/run_test_browser.sh" + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh" error_regex: "call_indirect to a null table entry" owners: - *adam + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh" + error_regex: "Input is not large enough" + owners: + - *adam + # https://gist.github.com/spalladino/4fd3d2abd7b7fb05be2e556649868626 + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh" + error_regex: "sending signal TERM to command" + owners: + - *adam + - regex: "barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/ultra_honk" + error_regex: "Aborted.*core dumped" + owners: + - *adam + - regex: "barretenberg/acir_tests/scripts/browser_prove.sh .* webkit" + owners: + - *luke + + # /home/aztec-dev/aztec-packages/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield_edge_cases.test.cpp:142: Failure + # Expected equality of these values: + # combined_a.get_value() > fq_ct::modulus + # Which is: false + # true + # http://ci.aztec-labs.com/9cb61bfe05d4f32f + - regex: "stdlib_primitives_tests stdlib_bigfield_edge_cases/2.reduction_check_works" + error_regex: "combined_a.get_value() > fq_ct::modulus" + owners: + - *suyash # noir # Something to do with how I run the tests now. Think these are fine in nextest. @@ -156,6 +185,24 @@ tests: error_regex: "✕ Repay" owners: - *lasse + # http://ci.aztec-labs.com/d681011c15fccdfe + # simulator:avm(f:0x9756f620) Unknown error thrown by AVM during bytecode retrieval: AssertionError [ERR_ASSERTION]: Contract instance for address 0x0f48856620f39f6ae1da55786b7e93e2358081333605161af4e29cfd507a3039 in DB: false != nullifier tree: true. This is a bug! + - regex: "src/e2e_epochs/epochs_proof_public_cross_chain.test.ts" + error_regex: "This is a bug" + owners: + - *palla + - regex: "src/e2e_epochs/epochs_proof_public_cross_chain.test.ts" + error_regex: "Error: Failed to find an available publisher" + owners: + - *palla + - regex: "src/e2e_epochs/epochs_l1_reorgs.test.ts" + owners: + - *palla + # http://ci.aztec-labs.com/e85ff6c5da65a1e7 + - regex: "src/e2e_cross_chain_messaging/l1_to_l2.test.ts" + error_regex: "Failed to advance block" + owners: + - *palla - regex: "src/e2e_epochs/epochs_empty_blocks" error_regex: "✕ successfully proves multiple epochs" owners: @@ -176,8 +223,8 @@ tests: error_regex: "✕ Can't claim funds" owners: - *lasse - - regex: "src/composed/integration_l1_publisher" - error_regex: "BlockOutOfRangeError" + - regex: "src/e2e_l1_publisher/e2e_l1_publisher.test.ts" + error_regex: "Anvil failed to stop in time|Cannot read properties of undefined" owners: - *palla # http://ci.aztec-labs.com/f2007345762c50a8 @@ -194,6 +241,22 @@ tests: error_regex: "expect\\(received\\).toContain\\(expected\\)" owners: - *palla + # http://ci.aztec-labs.com/3b197577b96c3639 + # http://ci.aztec-labs.com/d429457d95d075e2 + - regex: "src/e2e_p2p/slash_veto_demo.test.ts" + error_regex: "timeout: sending signal TERM to command|ValidatorSelection__InsufficientValidatorSetSize" + owners: + - *palla + # http://ci.aztec-labs.com/2927032942451013 + - regex: "src/e2e_p2p/valid_epoch_pruned_slash.test.ts" + error_regex: "Timeout waiting for proposal execution" + owners: + - *palla + # http://ci.aztec-labs.com/3055aa95e441471f + - regex: "src/e2e_p2p/e2e_p2p_broadcasted_invalid_block_proposal_slash.test.ts" + error_regex: "Timeout awaiting non-empty offenses" + owners: + - *palla # yarn-project tests - regex: "p2p/src/services/discv5/discv5_service.test.ts" @@ -232,9 +295,6 @@ tests: error_regex: "Failed to fetch dynamically imported module" owners: - *alex - - regex: "ethereum/src/deploy_l1_contracts.test.ts" - owners: - - *palla - regex: "ethereum/src/test/tx_delayer.test.ts" error_regex: "delays a transaction until a given L1 timestamp" owners: @@ -246,13 +306,15 @@ tests: - regex: "ivc-integration/src/rollup_ivc_integration.test.ts" error_regex: "Exceeded timeout of" owners: - - *lucas - - regex: "e2e_prover/full" - error_regex: "ProvingError: Failed to verify proof from key!" + - *luke - regex: "slasher/src/slasher_client.test.ts" error_regex: "ContractFunctionExecutionError: The contract function" owners: - *mitch + - regex: "e2e_p2p/inactivity_slash_with_consecutive_epochs.test.ts" + error_regex: "only slashes validator inactive for N consecutive epochs" + owners: + - *palla # Nightly GKE tests - regex: "spartan/bootstrap.sh" @@ -272,6 +334,10 @@ tests: error_regex: "Sumcheck failed" owners: - *luke + - regex: "aztec-up/scripts/run_test.sh bridge_and_claim" + error_regex: "Cannot satisfy constraint" + owners: + - *adam # boxes - regex: "vanilla-all-browsers box boxes" @@ -299,11 +365,21 @@ tests: owners: - *saleel - - regex: "BOX=vanilla BROWSER=* run_compose_test vanilla-all-browsers box boxes" # http://ci.aztec-labs.com/49f9945bc00aeef9 + - regex: "run_compose_test vanilla-all-browsers box boxes" # http://ci.aztec-labs.com/49f9945bc00aeef9 error_regex: "create account and cast vote" owners: - *saleel + - regex: "run_compose_test vanilla-all-browsers box boxes" + error_regex: "Tag mismatch at offset 0|app_logic_reverted|public bytecode has not been transpiled" + owners: + - *david + + - regex: "src/e2e_p2p/inactivity_slash.test.ts" + error_regex: "Cannot propose.*since the committee does not exist on L1" + owners: + - *palla + # Slack testing. - regex: "nonsense to match" error_regex: "something else" diff --git a/.vscode/launch.json b/.vscode/launch.json index f2a2d2c18504..5804ff6e8a24 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -36,13 +36,26 @@ "name": "Debug CIVC transaction", "type": "lldb", "request": "launch", - "program": "${workspaceFolder}/barretenberg/cpp/build-debug-no-avm/bin/bb", - "args": ["prove", "--scheme", "client_ivc", "--output_path", ".", "--ivc_inputs_path", "ivc-inputs.msgpack"], + "program": "${workspaceFolder}/barretenberg/cpp/build-debug-fast-no-avm/bin/bb", + "args": ["prove", "--scheme", "client_ivc", "--output_path", ".", "--ivc_inputs_path", "ivc-inputs.msgpack", "--print_bench"], "cwd": "${workspaceFolder}/yarn-project/end-to-end/example-app-ivc-inputs-out/ecdsar1+transfer_0_recursions+sponsored_fpc", "initCommands": [ "command script import ${workspaceFolder}/barretenberg/cpp/scripts/lldb_format.py" ], }, + { + // Debug acir test + "name": "Debug acir test", + "type": "lldb", + "request": "launch", + "program": "${workspaceFolder}/barretenberg/cpp/build-debug-fast-no-avm/bin/bb", + "args": ["prove", "--scheme", "ultra_honk", "--output_path", ".", "-b", "$./target/witness.gz"], + // replace __test_name__ with the intended test + "cwd": "${workspaceFolder}/barretenberg/acir_tests/acir_tests/__test__name__", + "initCommands": [ + "command script import ${workspaceFolder}/barretenberg/cpp/scripts/lldb_format.py" + ], + }, { "name": "Debug test_graph_for_arithmetic_gates", "type": "lldb", diff --git a/.vscode/settings.json b/.vscode/settings.json index c81dba17ed56..0964715c3fdf 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -174,7 +174,7 @@ // Clangd. Note that this setting may be overridden by user settings // to the default value "clangd". // - "clangd.path": "clangd-16", + "clangd.path": "clangd-20", // // C/C++ (should be disabled) // diff --git a/CI.md b/CI.md index 7d4ab9d648d7..f57e8b2ecf79 100644 --- a/CI.md +++ b/CI.md @@ -23,7 +23,7 @@ CI3: - Restricting resources such a vcpus, memory and storage. - Building a single final slim release image from `release-image`. - Provides a consistent command interface on `./bootstrap.sh` scripts, e.g. `./bootstrap.sh clean|fast|full|test|test_cmds`. -- Unifies how projects are tested allowing for a "build then test the entire repo" workflow. Projects expose their individual tests via `test_cmds` and they can all be parallelised at once to leverage maximum system throughput. +- Unifies how projects are tested allowing for a "build then test the entire repo" workflow. Projects expose their individual tests via `test_cmds` and they can all be parallelized at once to leverage maximum system throughput. - Runs on a single (currently large, 128 vcpu) machine. - Significantly reduces the chance of flakey tests making their way into master, by "grinding" the tests in the master merge queue. This simply executes the tests as above, but across N instances. (TBD) - Provides a shared redis cache at the test level, meaning the same test never needs to be run twice in CI (except when grinding). @@ -287,6 +287,7 @@ PRs targeting `next` must be squashed to a single commit unless labeled with `ci CI3 has granular caching, but as well it includes an additional layer of caching based on git content. When CI completes successfully, it stores a success marker keyed by the hash of the repository's file tree. On subsequent runs, if the exact same content is detected (same tree hash), CI will skip execution entirely. This is particularly useful when: + - You squash commits using `ci-squash-and-merge` - the resulting single commit has the same content, so CI won't re-run - You rebase without changes - if the final content is identical, CI is skipped - Multiple PRs have identical changes - only the first needs to run CI @@ -323,7 +324,7 @@ dl e6b8532f0c020b44 Let's say you open up a test run log, you'll see something like: ``` -Command: parallelise 64 (exit: 0) +Command: parallelize 64 (exit: 0) Starting test run with max 64 jobs... PASSED (http://ci.aztec-labs.com/736ae186bdf66226): yarn-project/end-to-end/scripts/run_test.sh simple e2e_synching PASSED (http://ci.aztec-labs.com/066e837f7af23761): yarn-project/end-to-end/scripts/run_test.sh simple e2e_public_testnet_transfer @@ -378,30 +379,6 @@ The images can be built and pushed to dockerhub using the `build-images/bootstra It also provides the ability to update our AWS AMI's which have the above build/devbox images embedded within them so we don't have to keep pulling them. -## Release Image - -Aztec is released as a _single_ mono-container. That is, everything we need to ship should end up in `aztecprotocol/aztec` and published to Dockerhub with version tags. - -The release image is created from a bootstrap, by the `release-image/Dockerfile`. The `Dockerfile.dockerignore` file ensures that only what's needed is copied into the container. We perform a multi-stage build to first strip back to production dependencies, and copy them into a final slim image. - -**It is _extremely_ important we keep this image as lightweight as possible. Do NOT significantly expand the size of this image without very good reason.** - -## Releases - -Release please is used and will automatically tag the commit e.g. `v1.2.3`. The project will subsequently be released under that version. - -You can also trigger pre and post releases using extended semver notation such as `v1.2.3-nightly.20250101` or `v1.2.3-devnet.0`. This are made simply by tagging the appropriate master commit. - -Releases can be performed directly from the terminal if necessary. However at present this will require `NPM_TOKEN` which is a secret restricted to a few people. In future we may provide a "staging organization" for less secure unofficial releases. - -One can also side-step Release Please automation by updating the version number in the root `.release-please-manifest.json`, committing, tagging the repository with e.g. `v1.2.3`, checking out the tag, and running: - -``` -./bootstrap.sh release -``` - -This is all that CI does when it wants to perform an official release. - ## Q&A ### I can't run `yarn clean` in a yarn-project sub project any more. How to do? diff --git a/CODEOWNERS b/CODEOWNERS index 9f3045436876..c14fd59b6397 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -39,3 +39,6 @@ # Notify the circuit team of changes to the protocol circuits /noir-projects/noir-protocol-circuits @LeilaWang + +# Notify devrel of changes to docs examples +/docs/examples @AztecProtocol/devrel diff --git a/avm-transpiler/Cargo.lock b/avm-transpiler/Cargo.lock index bf28cdd42245..f7038fbacd9d 100644 --- a/avm-transpiler/Cargo.lock +++ b/avm-transpiler/Cargo.lock @@ -63,7 +63,6 @@ dependencies = [ "keccak", "libaes", "log", - "num-bigint", "p256", "sha2", "thiserror", @@ -343,6 +342,7 @@ dependencies = [ "base64 0.21.7", "env_logger", "fxhash", + "libc", "log", "noirc_abi", "noirc_errors", @@ -1277,6 +1277,7 @@ dependencies = [ "fm", "fxhash", "im", + "indexmap 2.10.0", "iter-extended", "noirc_errors", "noirc_frontend", diff --git a/avm-transpiler/Cargo.toml b/avm-transpiler/Cargo.toml index cbdf1d39587b..dc421719a3a0 100644 --- a/avm-transpiler/Cargo.toml +++ b/avm-transpiler/Cargo.toml @@ -5,6 +5,10 @@ authors = ["The Aztec Team "] edition = "2024" license = "MIT OR Apache-2.0" +[lib] +name = "avm_transpiler" +crate-type = ["staticlib", "rlib"] + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -15,6 +19,9 @@ noirc_abi = { path = "../noir/noir-repo/tooling/noirc_abi" } noirc_evaluator = { path = "../noir/noir-repo/compiler/noirc_evaluator", features = ["bn254"] } noirc_frontend = { path = "../noir/noir-repo/compiler/noirc_frontend", features = ["test_utils"] } +# FFI +libc = "0.2" + # external base64 = "0.21" env_logger = "0.11" diff --git a/avm-transpiler/avm_transpiler.h b/avm-transpiler/avm_transpiler.h new file mode 100644 index 000000000000..84c77c8b3b39 --- /dev/null +++ b/avm-transpiler/avm_transpiler.h @@ -0,0 +1,70 @@ +#ifndef AVM_TRANSPILER_H +#define AVM_TRANSPILER_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result structure for transpilation operations + * + * Fields: + * - success: 1 if successful, 0 if failed + * - data: Pointer to output data (JSON string as bytes) + * - length: Length of output data in bytes + * - error_message: Error message if failed (null-terminated string) + */ +typedef struct { + int success; + unsigned char* data; + size_t length; + char* error_message; +} TranspileResult; + +/** + * Transpiles an ACIR contract artifact file to AVM bytecode + * + * @param input_path Path to input ACIR contract artifact JSON file + * @param output_path Path to output transpiled contract artifact JSON file + * @return TranspileResult containing success status, output data, or error message + * + * The function reads the ACIR contract from input_path, transpiles it to AVM bytecode, + * and writes the result to output_path. The output data in the result contains + * the same JSON that was written to the file. + * + * Call avm_free_result() to free the returned result. + */ +TranspileResult avm_transpile_file(const char* input_path, const char* output_path); + +/** + * Transpiles raw ACIR contract artifact bytecode to AVM bytecode + * + * @param input_data Pointer to input ACIR contract artifact JSON data + * @param input_length Length of input data in bytes + * @return TranspileResult containing success status, output data, or error message + * + * The function takes raw JSON bytes representing an ACIR contract artifact, + * transpiles it to AVM bytecode, and returns the transpiled contract artifact + * as JSON bytes in the result. + * + * Call avm_free_result() to free the returned result. + */ +TranspileResult avm_transpile_bytecode(const unsigned char* input_data, size_t input_length); + +/** + * Frees memory allocated by a TranspileResult + * + * @param result Pointer to TranspileResult to free + * + * This function must be called to free the memory allocated by + * avm_transpile_file() and avm_transpile_bytecode(). + */ +void avm_free_result(TranspileResult* result); + +#ifdef __cplusplus +} +#endif + +#endif /* AVM_TRANSPILER_H */ \ No newline at end of file diff --git a/avm-transpiler/bootstrap.sh b/avm-transpiler/bootstrap.sh index 459ffb5bb696..8d81b37aefaa 100755 --- a/avm-transpiler/bootstrap.sh +++ b/avm-transpiler/bootstrap.sh @@ -6,19 +6,65 @@ cmd=${1:-} hash=$(hash_str $(../noir/bootstrap.sh hash) $(cache_content_hash .rebuild_patterns)) -export GIT_COMMIT="0000000000000000000000000000000000000000" +export GIT_COMMIT="$(cat ../noir/noir-repo-ref | head -n1)-aztec" export SOURCE_DATE_EPOCH=0 export GIT_DIRTY=false export RUSTFLAGS="-Dwarnings" +# Temporarily duplicated with barretenberg/cpp/bootstrap.sh until part of base image +function ensure_zig { + if command -v zig &>/dev/null; then + return + fi + local arch=$(uname -m) + local zig_version=0.15.1 + local bin_path=/opt/zig-${arch}-linux-${zig_version} + if [ -f $bin_path/zig ]; then + export PATH="$bin_path:$PATH" + return + fi + echo "Installing zig $zig_version..." + curl -sL https://ziglang.org/download/$zig_version/zig-${arch}-linux-$zig_version.tar.xz | sudo tar -xJ -C /opt + export PATH="$bin_path:$PATH" +} + function build { echo_header "avm-transpiler build" artifact=avm-transpiler-$hash.tar.gz if ! cache_download $artifact; then - denoise "cargo build --release --locked" + denoise "cargo build --release --locked --bin avm-transpiler" + denoise "cargo build --release --locked --lib" denoise "cargo fmt --check" denoise "cargo clippy" - cache_upload $artifact target/release/avm-transpiler + cache_upload $artifact target/release/avm-transpiler target/release/libavm_transpiler.a + fi + cross_compile_artifact=avm-transpiler-cross-$hash.tar.gz + + if [ "$(arch)" == "amd64" ] && [ "$CI" -eq 1 ]; then + if ! cache_download $cross_compile_artifact; then + ensure_zig + # We build libraries to be linked by barretenberg + # For now we only use the zig build for macOS targets + if ! command -v cargo-zigbuild >/dev/null 2>&1; then + cargo install --locked cargo-zigbuild + fi + + targets=( + x86_64-apple-darwin + aarch64-apple-darwin + ) + + for target in "${targets[@]}"; do + if ! rustup target list --installed | grep -q "^$target$"; then + echo "Installing Rust target: $target" + rustup target add "$target" + fi + done + + parallel --tag --line-buffered cargo zigbuild --release --target {} --lib ::: "${targets[@]}" + + cache_upload $cross_compile_artifact target/x86_64-apple-darwin/release/libavm_transpiler.a target/aarch64-apple-darwin/release/libavm_transpiler.a + fi fi } diff --git a/avm-transpiler/src/lib.rs b/avm-transpiler/src/lib.rs new file mode 100644 index 000000000000..4acf2b1f13ad --- /dev/null +++ b/avm-transpiler/src/lib.rs @@ -0,0 +1,219 @@ +#![warn(clippy::semicolon_if_nothing_returned)] +#![cfg_attr(not(test), warn(unused_crate_dependencies, unused_extern_crates))] + +use env_logger as _; +use noirc_frontend as _; + +use libc::{c_char, c_int, size_t}; +use std::ffi::{CStr, CString}; +use std::fs; +use std::path::Path; +use std::slice; + +mod bit_traits; +mod instructions; +mod opcodes; +mod procedures; +mod transpile; +mod transpile_contract; +mod utils; + +pub use transpile::*; +pub use transpile_contract::*; + +#[repr(C)] +pub struct TranspileResult { + pub success: c_int, + pub data: *mut u8, + pub length: size_t, + pub error_message: *mut c_char, +} + +impl Default for TranspileResult { + fn default() -> Self { + Self { + success: 0, + data: std::ptr::null_mut(), + length: 0, + error_message: std::ptr::null_mut(), + } + } +} + +fn create_error_result(error: &str) -> TranspileResult { + let error_cstr = match CString::new(error) { + Ok(cstr) => cstr, + Err(_) => CString::new("Error message contains null bytes").unwrap(), + }; + + TranspileResult { + success: 0, + data: std::ptr::null_mut(), + length: 0, + error_message: error_cstr.into_raw(), + } +} + +fn create_success_result(data: Vec) -> TranspileResult { + let length = data.len(); + let data_ptr = Box::into_raw(data.into_boxed_slice()) as *mut u8; + + TranspileResult { success: 1, data: data_ptr, length, error_message: std::ptr::null_mut() } +} + +/// Transpile an Aztec contract from a file. +/// +/// # Safety +/// +/// - `input_path` must be a valid pointer to a null-terminated C string +/// - `output_path` must be a valid pointer to a null-terminated C string +/// - Both pointers must remain valid for the duration of this call +#[unsafe(no_mangle)] +pub unsafe extern "C" fn avm_transpile_file( + input_path: *const c_char, + output_path: *const c_char, +) -> TranspileResult { + if input_path.is_null() || output_path.is_null() { + return create_error_result("Input or output path is null"); + } + + // SAFETY: Caller ensures input_path is valid null-terminated C string + let input_path_str = match unsafe { CStr::from_ptr(input_path) }.to_str() { + Ok(s) => s, + Err(_) => return create_error_result("Invalid UTF-8 in input path"), + }; + + // SAFETY: Caller ensures output_path is valid null-terminated C string + let output_path_str = match unsafe { CStr::from_ptr(output_path) }.to_str() { + Ok(s) => s, + Err(_) => return create_error_result("Invalid UTF-8 in output path"), + }; + + let json_parse_error = format!( + "Unable to parse json for: {input_path_str} + This is probably a stale json file with a different wire format. + You might need to recompile the contract or delete the json file" + ); + + let contract_json = match fs::read_to_string(Path::new(input_path_str)) { + Ok(content) => content, + Err(e) => { + return create_error_result(&format!("Unable to read file {}: {}", input_path_str, e)); + } + }; + + let raw_json_obj: serde_json::Value = match serde_json::from_str(&contract_json) { + Ok(obj) => obj, + Err(_) => return create_error_result(&json_parse_error), + }; + + if let Some(serde_json::Value::Bool(true)) = raw_json_obj.get("transpiled") { + return create_error_result("Contract already transpiled"); + } + + if Path::new(output_path_str).exists() { + if let Err(e) = std::fs::copy( + Path::new(output_path_str), + Path::new(&(output_path_str.to_string() + ".bak")), + ) { + return create_error_result(&format!( + "Unable to backup file {}: {}", + output_path_str, e + )); + } + } + + let contract: CompiledAcirContractArtifact = match serde_json::from_str(&contract_json) { + Ok(contract) => contract, + Err(_) => return create_error_result(&json_parse_error), + }; + + let transpiled_contract = TranspiledContractArtifact::from(contract); + let transpiled_json = match serde_json::to_string(&transpiled_contract) { + Ok(json) => json, + Err(e) => return create_error_result(&format!("Unable to serialize json: {}", e)), + }; + + if let Err(e) = fs::write(output_path_str, &transpiled_json) { + return create_error_result(&format!("Unable to write file: {}", e)); + } + + create_success_result(transpiled_json.into_bytes()) +} + +/// Transpile an Aztec contract from bytecode. +/// +/// # Safety +/// +/// - `input_data` must be a valid pointer to a buffer of `input_length` bytes +/// - The buffer must remain valid for the duration of this call +#[unsafe(no_mangle)] +pub unsafe extern "C" fn avm_transpile_bytecode( + input_data: *const u8, + input_length: size_t, +) -> TranspileResult { + if input_data.is_null() { + return create_error_result("Input data is null"); + } + + // SAFETY: Caller ensures input_data points to valid memory of input_length bytes + let input_slice = unsafe { slice::from_raw_parts(input_data, input_length) }; + let contract_json = match String::from_utf8(input_slice.to_vec()) { + Ok(json) => json, + Err(_) => return create_error_result("Input data is not valid UTF-8"), + }; + + let json_parse_error = "Unable to parse input json. This is probably a stale json file with a different wire format."; + + let raw_json_obj: serde_json::Value = match serde_json::from_str(&contract_json) { + Ok(obj) => obj, + Err(_) => return create_error_result(json_parse_error), + }; + + if let Some(serde_json::Value::Bool(true)) = raw_json_obj.get("transpiled") { + return create_error_result("Contract already transpiled"); + } + + let contract: CompiledAcirContractArtifact = match serde_json::from_str(&contract_json) { + Ok(contract) => contract, + Err(_) => return create_error_result(json_parse_error), + }; + + let transpiled_contract = TranspiledContractArtifact::from(contract); + let transpiled_json = match serde_json::to_string(&transpiled_contract) { + Ok(json) => json, + Err(e) => return create_error_result(&format!("Unable to serialize json: {}", e)), + }; + + create_success_result(transpiled_json.into_bytes()) +} + +/// Free memory allocated by transpile functions. +/// +/// # Safety +/// +/// - `result` must be a valid pointer to a TranspileResult returned by a transpile function +/// - The result must not be used after calling this function +/// - This function must be called exactly once per result +#[unsafe(no_mangle)] +pub unsafe extern "C" fn avm_free_result(result: *mut TranspileResult) { + if result.is_null() { + return; + } + + // SAFETY: Caller ensures result is valid + let result = unsafe { &mut *result }; + + if !result.data.is_null() && result.length > 0 { + // SAFETY: data and length were created by Box::into_raw in create_success_result + let _ = unsafe { Box::from_raw(slice::from_raw_parts_mut(result.data, result.length)) }; + result.data = std::ptr::null_mut(); + result.length = 0; + } + + if !result.error_message.is_null() { + // SAFETY: error_message was created by CString::into_raw in create_error_result + let _ = unsafe { CString::from_raw(result.error_message) }; + result.error_message = std::ptr::null_mut(); + } +} diff --git a/avm-transpiler/src/main.rs b/avm-transpiler/src/main.rs index f3652de4e33b..7f8a3bc71946 100644 --- a/avm-transpiler/src/main.rs +++ b/avm-transpiler/src/main.rs @@ -1,22 +1,26 @@ #![warn(clippy::semicolon_if_nothing_returned)] #![cfg_attr(not(test), warn(unused_crate_dependencies, unused_extern_crates))] -use noirc_frontend as _; - use log::warn; use std::env; use std::fs; use std::path::Path; -mod bit_traits; -mod instructions; -mod opcodes; -mod procedures; -mod transpile; -mod transpile_contract; -mod utils; +// Use the library modules instead of redeclaring them +use avm_transpiler::{CompiledAcirContractArtifact, TranspiledContractArtifact}; -use transpile_contract::{CompiledAcirContractArtifact, TranspiledContractArtifact}; +// Acknowledge dependencies used by the library but not directly by the binary +use acvm as _; +use base64 as _; +use fxhash as _; +use libc as _; +use noirc_abi as _; +use noirc_errors as _; +use noirc_evaluator as _; +use noirc_frontend as _; +use once_cell as _; +use regex as _; +use serde as _; fn main() { env_logger::init(); diff --git a/avm-transpiler/src/transpile.rs b/avm-transpiler/src/transpile.rs index 47be70c1c1e9..b03be8c5b879 100644 --- a/avm-transpiler/src/transpile.rs +++ b/avm-transpiler/src/transpile.rs @@ -440,9 +440,6 @@ pub fn brillig_to_avm(brillig_bytecode: &[BrilligOpcode]) -> (Vec< &mut unresolved_jumps, ); } - BrilligOpcode::JumpIfNot { .. } => panic!( - "Transpiler doesn't know how to process `BrilligOpcode::JumpIfNot` brillig instruction", - ), } // Increment the AVM program counter. @@ -721,13 +718,13 @@ fn handle_emit_unencrypted_log( // The message array from Brillig is indirect. indirect: Some( AddressingModeBuilder::default() - .indirect_operand(&message_offset) .direct_operand(&message_size_offset) + .indirect_operand(&message_offset) .build(), ), operands: vec![ - AvmOperand::U16 { value: message_offset.to_usize() as u16 }, AvmOperand::U16 { value: message_size_offset.to_usize() as u16 }, + AvmOperand::U16 { value: message_offset.to_usize() as u16 }, ], ..Default::default() }); @@ -1197,11 +1194,7 @@ fn handle_black_box_function( ..Default::default() }); } - BlackBoxOp::Poseidon2Permutation { - message, - output, - len: _, // we don't use this. - } => { + BlackBoxOp::Poseidon2Permutation { message, output } => { // We'd love to validate the input size, but it's not known at compile time. assert_eq!(output.size, 4, "Poseidon2Permutation output size must be 4!"); let input_state_offset = message.pointer.to_usize(); @@ -1332,41 +1325,74 @@ fn handle_debug_log( destinations: &[ValueOrArray], inputs: &[ValueOrArray], ) { - if !destinations.is_empty() || inputs.len() != 3 { + // We need to handle two flavors here: + // + // #[oracle(utilityDebugLog)] + // unconstrained fn debug_log_array_oracle( + // log_level: u8, + // msg: str, + // length: u32, + // args: [Field; N], + // ) {} + // + // and + // + //#[oracle(utilityDebugLog)] + // unconstrained fn debug_log_slice_oracle(log_level: u8, msg: str, args: [Field]) {} + // + // Luckily, these two flavors have both 4 arguments, since noir inserts a length field for slices before the slice. + // So we can handle both cases with mostly the same code. + // + if !destinations.is_empty() || inputs.len() != 4 { panic!( - "Transpiler expects ForeignCall::DEBUGLOG to have 0 destinations and 3 inputs, got {} and {}", + "Transpiler expects ForeignCall::DEBUGLOG to have 0 destinations and 4 inputs, got {} and {}", destinations.len(), inputs.len() ); } - let (message_offset, message_size) = match &inputs[0] { + // Level + let level_offset = match &inputs[0] { + ValueOrArray::MemoryAddress(level) => level, + _ => panic!("Level for ForeignCall::DEBUGLOG should be a MemoryAddress."), + }; + // Message + let (message_offset, message_size) = match &inputs[1] { ValueOrArray::HeapArray(HeapArray { pointer, size }) => (pointer, *size as u32), _ => panic!("Message for ForeignCall::DEBUGLOG should be a HeapArray."), }; - // The fields are a slice, and this is represented as a (length: Field, slice: HeapVector). - // The length field is redundant and we skipt it. - let (fields_offset_ptr, fields_size_ptr) = match &inputs[2] { + // Length and pointer + let (fields_offset_ptr, fields_size_offset) = match &inputs[3] { ValueOrArray::HeapVector(HeapVector { pointer, size }) => (pointer, size), + ValueOrArray::HeapArray(HeapArray { pointer, .. }) => { + // match inputs[2] to be a regular + match &inputs[2] { + ValueOrArray::MemoryAddress(size) => (pointer, size), + _ => panic!("DebugLog with an array should have a memory address for the size."), + } + } _ => panic!("List of fields for ForeignCall::DEBUGLOG should be a HeapVector (slice)."), }; avm_instrs.push(AvmInstruction { opcode: AvmOpcode::DEBUGLOG, // (left to right) + // * level direct // * message_offset INDIRECT // * (N/A) message_size is an immediate // * fields_offset_ptr INDIRECT - // * fields_size_ptr direct + // * fields_size_offset direct indirect: Some( AddressingModeBuilder::default() + .direct_operand(level_offset) .indirect_operand(message_offset) .indirect_operand(fields_offset_ptr) - .direct_operand(fields_size_ptr) + .direct_operand(fields_size_offset) .build(), ), operands: vec![ + AvmOperand::U16 { value: level_offset.to_usize() as u16 }, AvmOperand::U16 { value: message_offset.to_usize() as u16 }, AvmOperand::U16 { value: fields_offset_ptr.to_usize() as u16 }, - AvmOperand::U16 { value: fields_size_ptr.to_usize() as u16 }, + AvmOperand::U16 { value: fields_size_offset.to_usize() as u16 }, ], immediates: vec![AvmOperand::U16 { value: message_size as u16 }], ..Default::default() diff --git a/avm-transpiler/src/transpile_contract.rs b/avm-transpiler/src/transpile_contract.rs index 9cfc5843b387..5fa671f30a16 100644 --- a/avm-transpiler/src/transpile_contract.rs +++ b/avm-transpiler/src/transpile_contract.rs @@ -53,7 +53,6 @@ pub struct AvmContractFunctionArtifact { deserialize_with = "ProgramDebugInfo::deserialize_compressed_base64_json" )] pub debug_symbols: ProgramDebugInfo, - pub brillig_names: Vec, } /// Representation of an ACIR contract function but with @@ -74,7 +73,6 @@ pub struct AcirContractFunctionArtifact { deserialize_with = "ProgramDebugInfo::deserialize_compressed_base64_json" )] pub debug_symbols: ProgramDebugInfo, - pub brillig_names: Vec, } /// An enum that allows the TranspiledContract struct to contain @@ -129,7 +127,6 @@ impl From for TranspiledContractArtifact { abi: function.abi, bytecode: base64::prelude::BASE64_STANDARD.encode(avm_bytecode), debug_symbols: ProgramDebugInfo { debug_infos }, - brillig_names: function.brillig_names, }, )); } else { @@ -210,7 +207,6 @@ fn create_revert_dispatch_fn() -> AvmOrAcirContractFunctionArtifact { }, bytecode: base64::prelude::BASE64_STANDARD.encode(revert_bytecode), debug_symbols: ProgramDebugInfo { debug_infos: vec![DebugInfo::default()] }, - brillig_names: vec!["public_dispatch".to_string()], }; AvmOrAcirContractFunctionArtifact::Avm(empty_dispatch_fn) diff --git a/aztec-nargo/README.md b/aztec-nargo/README.md deleted file mode 100644 index 79e3187a3754..000000000000 --- a/aztec-nargo/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## `aztec-nargo` - -The `aztec-nargo` utility is packaged with docker and does the following: -1. If the first argument to `aztec-nargo` is not `compile`, it just forwards args to `nargo` and exits. -1. If the first argument _is_ `compile`, it forwards args to `nargo` with some added options (like `--inliner-aggressiveness 0 --show-artifact-paths`) -3. Extracts all artifacts modified by `nargo` -4. Transpiles each artifact using the `avm-transpiler` -5. Generates verification keys for each artifact using `bb` (`barretenberg`'s binary) - -Example usage: `aztec-nargo compile` - -Note: uses versions of each tool from this repository (`nargo` version is from `../noir`). diff --git a/aztec-nargo/compile_then_postprocess.sh b/aztec-nargo/compile_then_postprocess.sh deleted file mode 100755 index 8136108a5bc7..000000000000 --- a/aztec-nargo/compile_then_postprocess.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash -# This is a wrapper script for nargo. -# Pass any args that you'd normally pass to nargo. -# If the first arg is "compile", -# run nargo and then postprocess any created artifacts. -# -# Usage: compile_then_postprocess.sh [nargo args] -set -euo pipefail - -dir=$(dirname $0) -NARGO=${NARGO:-"$dir/../noir/noir-repo/target/release/nargo"} -TRANSPILER=${TRANSPILER:-"$dir/../avm-transpiler/target/release/avm-transpiler"} -BB=${BB:-"$dir/../barretenberg/cpp/build/bin/bb"} - - -if [ "${1:-}" != "compile" ]; then - # if not compiling, just pass through to nargo verbatim - $NARGO $@ - exit $? -fi -shift # remove the compile arg so we can inject --show-artifact-paths - -# bb --version returns 00000000.00000000.00000000, so we compute -# the binary hash to ensure we invalidate vk cache artifacts when bb changes -bb_hash=$(sha256sum "$BB" | cut -d' ' -f1) - -# Forward all arguments to nargo, tee output to console. -# Nargo should be outputting errors to stderr, but it doesn't. Use tee to duplicate stdout to stderr to display errors. -artifacts_to_process=$($NARGO compile --pedantic-solving --inliner-aggressiveness 0 --show-artifact-paths $@ | tee >(cat >&2) | grep -oP 'Saved contract artifact to: \K.*') - -# Postprocess each artifact -# `$artifacts_to_process` needs to be unquoted here, otherwise it will break if there are multiple artifacts -for artifact in $artifacts_to_process; do - # Transpile in-place - $TRANSPILER "$artifact" "$artifact" - artifact_name=$(basename "$artifact") - cache_dir=$(dirname "$artifact")/cache - mkdir -p "$cache_dir" - echo "Generating verification keys for functions in $artifact_name. Cache directory: $cache_dir" - - # See contract_artifact.ts (getFunctionType) for reference - private_fn_indices=$(jq -r '.functions | to_entries | map(select((.value.custom_attributes | contains(["public"]) | not) and (.value.is_unconstrained == false))) | map(.key) | join(" ")' "$artifact") - - # Build a list of BB verification key generation commands - job_commands=() - for fn_index in $private_fn_indices; do - fn_name=$(jq -r ".functions[$fn_index].name" "$artifact") - # Remove debug symbols since they don't affect vk computation, but can cause cache misses - fn_artifact=$(jq -r ".functions[$fn_index] | del(.debug_symbols)" "$artifact") - fn_artifact_hash=$(echo "$fn_artifact-$bb_hash" | sha256sum | cut -d' ' -f1) - - # File to capture the base64 encoded verification key. - vk_cache="$cache_dir/${artifact_name}_${fn_artifact_hash}.vk" - - # Don't regenerate if vk_cache exists - if [ -f "$vk_cache" ]; then - echo "Using cached verification key for function \"$fn_name\"" - continue - fi - - fn_artifact_path="$artifact.function_artifact_$fn_index.json" - echo "$fn_artifact" > "$fn_artifact_path" - - # Construct the command: - # The BB call is wrapped by GNU parallel's memsuspend (active memory-based suspension) - # This command will generate the verification key, base64 encode it, and save it to vk_cache. - job_commands+=("echo \"Generating verification key for function $fn_name\"; $BB write_vk --scheme client_ivc --verifier_type standalone -b \"$fn_artifact_path\" -o - | base64 > \"$vk_cache\"; rm \"$fn_artifact_path\"") - done - - # Run the commands in parallel, limiting to available cores and using memsuspend to actively suspend jobs if memory usage exceeds 2G. - # GNU parallel will suspend a job if free memory drops below 1G. - printf "%s\n" "${job_commands[@]}" | parallel --jobs "$(nproc)" --memsuspend 1G - - # Now, update the artifact sequentially with each generated verification key. - for fn_index in $private_fn_indices; do - fn_artifact=$(jq -r ".functions[$fn_index] | del(.debug_symbols)" "$artifact") - fn_artifact_hash=$(echo "$fn_artifact-$bb_hash" | sha256sum | cut -d' ' -f1) - vk_cache="$cache_dir/${artifact_name}_${fn_artifact_hash}.vk" - verification_key=$(cat "$vk_cache") - # Update the artifact with the new verification key. - jq ".functions[$fn_index].verification_key = \"$verification_key\"" "$artifact" > "$artifact.tmp" - mv "$artifact.tmp" "$artifact" - done -done diff --git a/aztec-postprocess-contract/README.md b/aztec-postprocess-contract/README.md new file mode 100644 index 000000000000..a6512572d47f --- /dev/null +++ b/aztec-postprocess-contract/README.md @@ -0,0 +1,14 @@ +## `aztec-postprocess-contract` + +The Aztec compilation process consists of two steps: +1. `aztec-nargo` which just forwards all arguments to Noir's `nargo` compiler at the version tied to this version of aztec. +2. `aztec-postprocess-contract` which post-processes the compiled contracts to prepare them for use in the Aztec ecosystem. + +### `transpile_contract_and_gen_vks.sh` +This script provides the core functionality behind the `aztec-postprocess-contract` command available via aztec-up. It performs postprocessing on compiled Noir contracts: +1. Finds all contract artifacts in `target` directories +2. Transpiles each artifact using the `avm-transpiler` +3. Generates verification keys for each artifact using `bb` (`barretenberg`'s binary) +4. Caches verification keys to speed up subsequent compilations + +Example usage: `aztec-postprocess-contract` (via aztec-up) or directly `./transpile_contract_and_gen_vks.sh` diff --git a/aztec-postprocess-contract/transpile_contract_and_gen_vks.sh b/aztec-postprocess-contract/transpile_contract_and_gen_vks.sh new file mode 100755 index 000000000000..32bc4f66587c --- /dev/null +++ b/aztec-postprocess-contract/transpile_contract_and_gen_vks.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# This script performs postprocessing on compiled Noir contracts. +# It expects to find compiled artifacts and transforms them via +# transpilation and verification key generation. +# +# Usage: transpile_contract_and_gen_vks.sh [artifact_path ...] +# If no paths provided, bb will search for artifacts in target/ directories +set -euo pipefail + +dir=$(dirname $0) +BB=${BB:-"$dir/../barretenberg/cpp/build/bin/bb"} + +# No arguments provided - let bb auto-discover and process all artifacts +echo "Searching for contract artifacts in target/ directories..." +$BB aztec_process + +echo "Contract postprocessing complete!" diff --git a/aztec-up/README.md b/aztec-up/README.md index f6aa5b69b708..c484ecde1418 100644 --- a/aztec-up/README.md +++ b/aztec-up/README.md @@ -11,7 +11,8 @@ the user's `PATH` variable in their shell startup script so they can be found. - `aztec` - The infrastructure container. - `aztec-cli` - A command-line tool for interacting with infrastructure. -- `aztec-nargo` - A build of `nargo` from `noir` that is guaranteed to be version-aligned. Provides compiler, lsp and more. On `aztec-nargo compile <...>`, automatically transpiles artifacts using `avm-transpiler` and generates verification keys using `bb`. +- `aztec-nargo` - A build of `nargo` from `noir` that is guaranteed to be version-aligned. Provides compiler, lsp and more. +- `aztec-postprocess-contract` - Postprocessing tool for Aztec contracts that transpiles artifacts using `avm-transpiler` and generates verification keys using `bb`. - `aztec-sandbox` - A wrapper around docker-compose that launches services needed for sandbox testing. - `aztec-up` - A tool to upgrade the aztec toolchain to the latest, or specific versions. - `aztec-builder` - A useful tool for projects to generate ABIs and update their dependencies. diff --git a/aztec-up/bin/.aztec-run b/aztec-up/bin/.aztec-run index fe929daa8cfc..3eea889b4003 100755 --- a/aztec-up/bin/.aztec-run +++ b/aztec-up/bin/.aztec-run @@ -22,6 +22,8 @@ VERSION=${VERSION:-"latest"} # We default to colored logs. FORCE_COLOR=${FORCE_COLOR:-1} +DOCKER_REPO=${DOCKER_REPO:-"aztecprotocol/aztec"} + function warn { y="\033[33m" r="\033[0m" @@ -144,7 +146,7 @@ function run { ${arg_host_binds:-} \ ${arg_user:-} \ --entrypoint "" \ - aztecprotocol/aztec:$VERSION "${args[@]}" + $DOCKER_REPO:$VERSION "${args[@]}" } if [ -t 0 ]; then diff --git a/aztec-up/bin/aztec b/aztec-up/bin/aztec index 702e9723b719..ef53d4e08b96 100755 --- a/aztec-up/bin/aztec +++ b/aztec-up/bin/aztec @@ -1,8 +1,9 @@ #!/usr/bin/env bash set -euo pipefail -NETWORK=${NETWORK:-} -VERSION=${VERSION:-${NETWORK:-"latest"}} +export AZTEC_PATH="${AZTEC_PATH:-$HOME/.aztec}" +export VERSION="${VERSION:-$(cat "$AZTEC_PATH/default_version")}" +export DOCKER_REPO="${DOCKER_REPO:-"aztecprotocol/aztec"}" # Take copy of command-line arguments, so we can mutate to parse. if [ $# -eq 0 ]; then @@ -10,6 +11,7 @@ if [ $# -eq 0 ]; then else args=("$@") fi + while [ "$#" -gt 0 ]; do case $1 in -p | --port) @@ -34,14 +36,6 @@ while [ "$#" -gt 0 ]; do ADMIN_PORT="$2" shift 2 ;; - --pxe.network) - # Set version to user-specified network (e.g. 'devnet') - VERSION="$2" - echo "Using aztecprotocol/aztec:$VERSION" - # Turn on proving if connecting to a network. - export PXE_PROVER_ENABLED=1 - shift 2 - ;; --help) NO_PORT_FORWARDING=1 shift 1 @@ -55,7 +49,7 @@ done set -- "${args[@]}" function get_env_vars { - docker run --rm --entrypoint /bin/bash aztecprotocol/aztec:$VERSION -c "cat /usr/src/yarn-project/foundation/src/config/env_var.ts" | + docker run --rm --entrypoint /bin/bash $DOCKER_REPO:$VERSION -c "cat /usr/src/yarn-project/foundation/src/config/env_var.ts" | awk -F"'" '{for(i=2;i<=NF;i+=2) printf $i " "}' } @@ -65,12 +59,14 @@ case ${1:-} in # Should this just be aztec-test? It's like, a new command that doesn't exist on aztec cli. # Or just make this a first class command on aztec cli? + export ENV_VARS_TO_INJECT="LOG_LEVEL" + export LOG_LEVEL="${LOG_LEVEL:-info}" + # Properly escape all arguments args_str=$(printf '%q ' "$@") # TODO: Need to force ipv4 here with 127.0.0.1 for some reason. TXE's not on ipv6? exec $(dirname $0)/.aztec-run "" bash -c " - export LOG_LEVEL=${LOG_LEVEL:-"info"} node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --txe --port 8081 & while ! nc -z 127.0.0.1 8081 &>/dev/null; do sleep 0.2; done export NARGO_FOREIGN_CALL_TIMEOUT=300000 @@ -95,7 +91,7 @@ case ${1:-} in export CONTAINER_NAME=aztec-start-$(printf "%08x" $((RANDOM * RANDOM))) if [ "${1:-}" == "--sandbox" ]; then - if [ "$SUPERVISED" == "1" ]; then + if [ "$SUPERVISED" == "1" ]; then echo "supervised-start is not compatible with --sandbox. Please use regular start command" exit 1 fi @@ -105,7 +101,6 @@ case ${1:-} in # We should not have to provide a binary path and working dir. # Why is this not just determined and chosen at runtime? # In fact almost none of these should not have to be set if we have sensible defaults. - export L1_CHAIN_ID=31337 export ARCHIVER_POLLING_INTERVAL_MS=500 export P2P_BLOCK_CHECK_INTERVAL_MS=500 export SEQ_TX_POLLING_INTERVAL_MS=500 @@ -115,10 +110,12 @@ case ${1:-} in export LOG_LEVEL=${LOG_LEVEL:-info;silent:sequencer;verbose:debug_log} export DEPLOY_AZTEC_CONTRACTS_SALT=${DEPLOY_AZTEC_CONTRACTS_SALT:-$RANDOM} - ANVIL_PORT=${ANVIL_PORT:-8545} anvil_port_assignment="$ANVIL_PORT:8545" + export L1_CHAIN_ID=${L1_CHAIN_ID:-31337} + export ETHEREUM_HOSTS=${ETHEREUM_HOSTS:-"http://127.0.0.1:${ANVIL_PORT}"} + PORTS_TO_EXPOSE="${PORTS_TO_EXPOSE:-} $anvil_port_assignment" exec $(dirname $0)/.aztec-run aztec-sandbox bash -c " @@ -208,7 +205,7 @@ case ${1:-} in --env SERVE=${SERVE:-0} \ $([ "${SERVE:-0}" == "1" ] && echo "-p 8000:8000" || echo "") \ -v $(realpath $(dirname $2))/:/tmp \ - aztecprotocol/aztec:$VERSION /tmp/$(basename $2) $3 + $DOCKER_REPO:$VERSION /tmp/$(basename $2) $3 ;; *) export ENV_VARS_TO_INJECT="SECRET_KEY" diff --git a/aztec-up/bin/aztec-install b/aztec-up/bin/aztec-install index 4ee6842e9373..7d5a808feeb4 100755 --- a/aztec-up/bin/aztec-install +++ b/aztec-up/bin/aztec-install @@ -17,7 +17,7 @@ else NON_INTERACTIVE=${NON_INTERACTIVE:-0} fi -AZTEC_PATH=$HOME/.aztec +AZTEC_PATH="${AZTEC_PATH:-$HOME/.aztec}" BIN_PATH=${BIN_PATH:-$AZTEC_PATH/bin} # Define version if specified, otherwise set to "latest". @@ -58,10 +58,11 @@ function title() { echo -e "${r}" fi echo -e "This will install the following scripts and update your PATH if necessary:" - echo -e " ${bold}${g}aztec${r} - a collection of tools to launch subsystems and interact with the aztec network." - echo -e " ${bold}${g}aztec-nargo${r} - aztec's build of nargo, the noir compiler toolchain." - echo -e " ${bold}${g}aztec-up${r} - a tool to upgrade the aztec toolchain to the latest, or specific versions." - echo -e " ${bold}${g}aztec-wallet${r} - our minimalistic CLI wallet" + echo -e " ${bold}${g}aztec${r} - a collection of tools to launch subsystems and interact with the aztec network." + echo -e " ${bold}${g}aztec-nargo${r} - aztec's build of nargo, the noir compiler toolchain." + echo -e " ${bold}${g}aztec-postprocess-contract${r} - postprocessing tool for Aztec contracts (transpilation and VK generation)." + echo -e " ${bold}${g}aztec-up${r} - a tool to upgrade the aztec toolchain to the latest, or specific versions." + echo -e " ${bold}${g}aztec-wallet${r} - our minimalistic CLI wallet" echo read -p "Do you wish to continue? (y/n)" -n 1 -r echo @@ -145,15 +146,14 @@ if ! docker info &>/dev/null; then exit 1 fi +mkdir -p "$AZTEC_PATH" +TMP_VERSION_FILE="$(mktemp)" +echo "$VERSION" > "$TMP_VERSION_FILE" + # Pull the aztec container. if [ -z "${SKIP_PULL:-}" ]; then info "Pulling aztec version $VERSION..." docker pull aztecprotocol/aztec:$VERSION - - # If not latest, retag to be latest so it runs from scripts. - if [ $VERSION != "latest" ]; then - docker tag aztecprotocol/aztec:$VERSION aztecprotocol/aztec:latest - fi fi info "Installing scripts in $BIN_PATH..." @@ -162,7 +162,9 @@ install_bin .aztec-run install_bin aztec install_bin aztec-up install_bin aztec-nargo +install_bin aztec-postprocess-contract install_bin aztec-wallet +mv "$TMP_VERSION_FILE" "$AZTEC_PATH/default_version" update_path_env_var $BIN_PATH diff --git a/aztec-up/bin/aztec-nargo b/aztec-up/bin/aztec-nargo index e186645992e3..c0069f45b74b 100755 --- a/aztec-up/bin/aztec-nargo +++ b/aztec-up/bin/aztec-nargo @@ -6,6 +6,10 @@ if [[ $PWD != ${HOME}* ]]; then exit 1 fi +# Special-case nargo's LSP command: +# 1. include --rm for cleanup +# 2. don't specify a user (run as root) +# 3. don't specify a workdir if [ "${1:-}" == "lsp" ]; then docker run --rm -i \ --name aztec-nargo-lsp \ @@ -16,6 +20,7 @@ if [ "${1:-}" == "lsp" ]; then exit fi +# Determine if we need interactive/tty flags if [ -t 0 ]; then if [ -t 1 ]; then args="-ti" @@ -24,10 +29,11 @@ if [ -t 0 ]; then fi fi +# Pass through directly to vanilla nargo docker run ${args:-} \ --user $(id -u):$(id -g) \ -v $HOME:$HOME \ -e HOME=$HOME \ --workdir="$PWD" \ - --entrypoint /usr/src/aztec-nargo/compile_then_postprocess.sh \ + --entrypoint=/usr/src/noir/noir-repo/target/release/nargo \ aztecprotocol/aztec "$@" diff --git a/aztec-up/bin/aztec-postprocess-contract b/aztec-up/bin/aztec-postprocess-contract new file mode 100755 index 000000000000..35b7f1d1fdcf --- /dev/null +++ b/aztec-up/bin/aztec-postprocess-contract @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [[ $PWD != ${HOME}* ]]; then + >&2 echo "Due to how we containerize our applications, we require your working directory to be somewhere within $HOME." + exit 1 +fi + +# Determine if we need interactive/tty flags +if [ -t 0 ]; then + if [ -t 1 ]; then + args="-ti" + else + args="-i" + fi +fi + +# Run the aztec-postprocess-contract script from within the container +docker run ${args:-} \ + --user $(id -u):$(id -g) \ + -v $HOME:$HOME \ + -e HOME=$HOME \ + --workdir="$PWD" \ + --entrypoint=/usr/src/aztec-postprocess-contract/transpile_contract_and_gen_vks.sh \ + aztecprotocol/aztec "$@" \ No newline at end of file diff --git a/aztec-up/bootstrap.sh b/aztec-up/bootstrap.sh index 3e33d653aeac..3cecb59e799a 100755 --- a/aztec-up/bootstrap.sh +++ b/aztec-up/bootstrap.sh @@ -32,7 +32,7 @@ function test_cmds { function test { echo_header "aztec-up test" - test_cmds | filter_test_cmds | parallelise + test_cmds | filter_test_cmds | parallelize } function release { diff --git a/aztec-up/scripts/run_test.sh b/aztec-up/scripts/run_test.sh index a0d32a4d8cf7..c4625e9d0a6b 100755 --- a/aztec-up/scripts/run_test.sh +++ b/aztec-up/scripts/run_test.sh @@ -3,6 +3,7 @@ set -euo pipefail trap 'docker rm -f $1 &>/dev/null' SIGINT SIGTERM EXIT docker rm -f $1 &>/dev/null || true +docker pull aztecprotocol/dind docker run --rm \ -d \ --privileged \ diff --git a/aztec-up/test/amm_flow.sh b/aztec-up/test/amm_flow.sh index 2d7b512bee50..6d52f2c61aec 100755 --- a/aztec-up/test/amm_flow.sh +++ b/aztec-up/test/amm_flow.sh @@ -52,13 +52,12 @@ aztec-wallet \ aztec-wallet \ register-contract $canonical_sponsored_fpc_address SponsoredFPC \ - -f accounts:main \ --salt 0 aztec-wallet \ --prover none \ deploy-account \ - -f accounts:main \ + main \ $SPONSORED_FPC_PAYMENT_METHOD echo "Deploying AMM setup" diff --git a/aztec-up/test/bridge_and_claim.sh b/aztec-up/test/bridge_and_claim.sh index 749d02c25819..398057c75d60 100755 --- a/aztec-up/test/bridge_and_claim.sh +++ b/aztec-up/test/bridge_and_claim.sh @@ -42,7 +42,7 @@ aztec-wallet \ --register-only aztec-wallet \ - bridge-fee-juice 1000000000000000000 accounts:main \ + bridge-fee-juice 1000000000000000000000 accounts:main \ --mint \ --no-wait @@ -59,8 +59,8 @@ done aztec-wallet \ --prover native \ deploy-account \ + accounts:main \ --payment method=fee_juice,claim \ - -f accounts:main # We sanity check the account deployment worked and the fee juice was claimed by deploying a token # with the new account. diff --git a/aztec-up/test/counter_contract.sh b/aztec-up/test/counter_contract.sh index be01c44225d7..3c54816fbf80 100755 --- a/aztec-up/test/counter_contract.sh +++ b/aztec-up/test/counter_contract.sh @@ -55,8 +55,11 @@ cp -Rf ./aztec-packages/noir-projects/noir-contracts/contracts/test/counter_cont cd counter_contract sed -i 's|\.\./\.\./\.\./\.\./|/home/ubuntu/aztec-packages/noir-projects/|g' Nargo.toml -# Compile and codegen. +# Compile the contract. aztec-nargo compile +# Post-process the contract. +aztec-postprocess-contract +# Codegen aztec codegen -o src/artifacts target if [ ! -d src/artifacts ]; then echo "Failed to codegen TypeScript." diff --git a/barretenberg/README.md b/barretenberg/README.md index 9afa316ef4ba..3c95488362cd 100644 --- a/barretenberg/README.md +++ b/barretenberg/README.md @@ -319,15 +319,15 @@ cmake --build --preset default --target run_ecc_bench ### Debugging -#### Debugging Verifification Failures +#### Debugging Verification Failures The CircuitChecker::check_circuit function is used to get the gate index and block information about a failing circuit constraint. If you are in a scenario where you have a failing call to check_circuit and wish to get more information out of it than just the gate index, you can use this feature to get a stack trace, see example below. Usage instructions: -- On ubuntu (or our mainframe accounts) use `sudo apt-get install libdw-dev` to support trace printing -- Use `cmake --preset clang16-dbg-fast-circuit-check-traces` and `cmake --build --preset clang16-dbg-fast-circuit-check-traces` to enable the backward-cpp dependency through the CHECK_CIRCUIT_STACKTRACES CMake variable. +- On ubuntu (or our mainframe accounts) use `sudo apt-get install libdw-dev libelf-dev` to support trace printing +- Use `cmake --preset debug-fast-circuit-check-traces` and `cmake --build --preset debug-fast-circuit-check-traces` to enable the backward-cpp dependency through the CHECK_CIRCUIT_STACKTRACES CMake variable. - Run any case where you have a failing check_circuit call, you will now have a stack trace illuminating where this constraint was added in code. Caveats: @@ -450,7 +450,7 @@ In terms of general usage, you should be able to use scrolling or the WASD keys ##### Adding Zones -Zones are how you can keep track of where you are relative in the code and how you can bucket allocations together. All of the colored blocks in the Main Thread row and other threads' rows refer to zones. You can nest zones in deeper and deeper scopes, which leads to stacks of these zones. To add a named zone, all you have to do is add PROFILE_THIS() or PROFILE_THIS_NAME() to a scope and it will create a zone. Note that you can't create multiple zones in the same scope. +Zones are how you can keep track of where you are relative in the code and how you can bucket allocations together. All of the colored blocks in the Main Thread row and other threads' rows refer to zones. You can nest zones in deeper and deeper scopes, which leads to stacks of these zones. To add a named zone, all you have to do is add BB_BENCH_TRACY() or BB_BENCH_TRACY_NAME() to a scope and it will create a zone. Note that you can't create multiple zones in the same scope. ##### Analyzing Fragmentation diff --git a/barretenberg/acir_tests/README.md b/barretenberg/acir_tests/README.md index 8b8eb8a554c8..e3c232874bd9 100644 --- a/barretenberg/acir_tests/README.md +++ b/barretenberg/acir_tests/README.md @@ -1,66 +1,46 @@ -# Acir Test Vector Runner +# ACIR Tests -The aim is to verify acir tests verify through a given backend binary. "Backend binaries" can include e.g.: +- Copies test programs from Noir (in `acir_tests/`) and compiles them +- The scripts/ folder assists bootstrap.sh in defining test scenarios involving compiled private Noir function artifacts (the bytecode being in ACIR format, hence the name of this module) +- The bootstrap.sh script is the source of truth for which proving modes are tested, e.g. solidity-friendly ultra honk uses --oracle_hash keccak. -- bb (native CLI) -- bb.js (typescript CLI) -- bb.js-dev (symlink in your PATH that runs the typescript CLI via ts-node) -- bb.js.browser (script in `headless-test` that runs a test through bb.js in a browser instance via playwright) +## Quick Start -## Building the tests. - -To build all the tests: - -``` +```bash +# Build all the test programs ./bootstrap.sh -``` -This will clone the acir test vectors from the noir repo, removing any that are not relevent. -It will then compile them all using local repo versions of nargo and bb (used for generating recursive inputs). - -## Running the tests. - -``` +# Run all tests ./bootstrap.sh test ``` -This will run all the tests as returned by `./bootstrap.sh test_cmds`. - -To run a single test you can: +## Running Specific Tests -``` -./run_test.sh -``` +The easiest way to find how to run specific test(s): -By default this will use the native binary `../cpp/build/bin/bb` and the `prove_and_verify` flow. +```bash +# See all available test commands +./bootstrap.sh test_cmds -You can substitute the backend binary using the `BIN` environment variable. -You can turn on logging with `VERBOSE` environment variable. -You can specify which proving system to use with the `SYS` variable (ultra_honk, ultra_rollup_honk, mega_honk). -If not specified it defaults to plonk (TODO: Make explicit). - -``` -$ SYS=ultra_honk BIN=bb.js VERBOSE=1 ./run_test.sh a_1_mul +# Find a specific test +./bootstrap.sh test_cmds | grep assert_statement ``` -You can use a relative path to an executable. e.g. if bb.js-dev is not symlinked into your PATH: - +This will show you the exact commands used in CI. For example: ``` -$ BIN=../ts/bb.js-dev VERBOSE=1 ./run_test.sh a_1_mul +c5f89...:ISOLATE=1 scripts/bb_prove_sol_verify.sh assert_statement --disable_zk +c5f89...:ISOLATE=1 scripts/bb_prove_sol_verify.sh assert_statement +c5f89... scripts/bb_prove_bbjs_verify.sh assert_statement +c5f89... scripts/bb_prove.sh assert_statement ``` -``` -$ BIN=./headless-test/bb.js.browser VERBOSE=1 ./run_test.sh a_1_mul +You can run any of these commands directly (ignore the hash prefix): +```bash +scripts/bb_prove.sh assert_statement ``` -You can specify a different testing "flow" with `FLOW` environment variable. Flows are in the `flows` dir. -The default flow is `prove_and_verify`, which is the quickest way to... prove and verify. It's used to test the acir -test vectors actually all pass in whichever version of the backend is being run. -The `all_cmds` flow tests all the supported commands on the binary. Slower, but is there to test the cli. +Programmatically, you can also do from root: +```bash +./barretenberg/acir_tests/bootstrap.sh test_cmds | grep assert_statement | ci3/parallelize ``` -$ FLOW=all_cmds ./run_acir_tests.sh a_1_mul -``` - -We currently have to use a separate flow script to run client_ivc scheme as opposed to just setting `SYS` due to -how cli commands are handled non-uniformly. diff --git a/barretenberg/acir_tests/bbjs-test/src/index.ts b/barretenberg/acir_tests/bbjs-test/src/index.ts index 2bc07d7fcff6..64dbad6029d0 100644 --- a/barretenberg/acir_tests/bbjs-test/src/index.ts +++ b/barretenberg/acir_tests/bbjs-test/src/index.ts @@ -9,9 +9,7 @@ const logger = pino({ }); const proofPath = (dir: string) => path.join(dir, "proof"); -const proofAsFieldsPath = (dir: string) => path.join(dir, "proof_fields.json"); -const publicInputsAsFieldsPath = (dir: string) => - path.join(dir, "public_inputs_fields.json"); +const publicInputsPath = (dir: string) => path.join(dir, "public_inputs"); const vkeyPath = (dir: string) => path.join(dir, "vk"); async function generateProof({ @@ -27,7 +25,7 @@ async function generateProof({ oracleHash?: string; multiThreaded?: boolean; }) { - const { UltraHonkBackend, deflattenFields } = await import("@aztec/bb.js"); + const { UltraHonkBackend } = await import("@aztec/bb.js"); logger.debug(`Generating proof for ${bytecodePath}...`); const circuitArtifact = await fs.readFile(bytecodePath); @@ -45,17 +43,16 @@ async function generateProof({ await fs.writeFile(proofPath(outputDirectory), Buffer.from(proof.proof)); logger.debug("Proof written to " + proofPath(outputDirectory)); - await fs.writeFile( - publicInputsAsFieldsPath(outputDirectory), - JSON.stringify(proof.publicInputs) + // Convert public inputs from field strings to binary + const publicInputsBuffer = Buffer.concat( + proof.publicInputs.map((field: string) => { + const hex = field.startsWith('0x') ? field.slice(2) : field; + return Buffer.from(hex.padStart(64, '0'), 'hex'); + }) ); + await fs.writeFile(publicInputsPath(outputDirectory), publicInputsBuffer); logger.debug( - "Public inputs written to " + publicInputsAsFieldsPath(outputDirectory) - ); - - await fs.writeFile( - proofAsFieldsPath(outputDirectory), - JSON.stringify(deflattenFields(proof.proof)) + "Public inputs written to " + publicInputsPath(outputDirectory) ); const verificationKey = await backend.getVerificationKey({ @@ -69,21 +66,24 @@ async function generateProof({ } async function verifyProof({ directory }: { directory: string }) { - const { BarretenbergVerifier } = await import("@aztec/bb.js"); + const { UltraHonkVerifierBackend } = await import("@aztec/bb.js"); - const verifier = new BarretenbergVerifier(); + const verifier = new UltraHonkVerifierBackend(); const proof = await fs.readFile(proofPath(directory)); - const publicInputs = JSON.parse( - await fs.readFile(publicInputsAsFieldsPath(directory), "utf8") - ); + // Read binary public inputs and convert to field strings + const publicInputsBinary = await fs.readFile(publicInputsPath(directory)); + const publicInputs = []; + for (let i = 0; i < publicInputsBinary.length; i += 32) { + const chunk = publicInputsBinary.slice(i, Math.min(i + 32, publicInputsBinary.length)); + publicInputs.push('0x' + chunk.toString('hex')); + } logger.debug(`publicInputs: ${JSON.stringify(publicInputs)}`); - const vkey = await fs.readFile(vkeyPath(directory)); + const verificationKey = await fs.readFile(vkeyPath(directory)); - const verified = await verifier.verifyUltraHonkProof( - { proof: new Uint8Array(proof), publicInputs }, - new Uint8Array(vkey) + const verified = await verifier.verifyProof( + { proof: new Uint8Array(proof), publicInputs, verificationKey}, ); await verifier.destroy(); diff --git a/barretenberg/acir_tests/bootstrap.sh b/barretenberg/acir_tests/bootstrap.sh index 5decf536abbf..321d89b93e35 100755 --- a/barretenberg/acir_tests/bootstrap.sh +++ b/barretenberg/acir_tests/bootstrap.sh @@ -3,8 +3,6 @@ source $(git rev-parse --show-toplevel)/ci3/source_bootstrap cmd=${1:-} export CRS_PATH=$HOME/.bb-crs -native_build_dir=$(../cpp/scripts/native-preset-build-dir) -export bb=$(realpath ../cpp/$native_build_dir/bin/bb) tests_tar=barretenberg-acir-tests-$(hash_str \ $(../../noir/bootstrap.sh hash-tests) \ @@ -23,12 +21,18 @@ tests_hash=$(hash_str \ ../ts/.rebuild_patterns \ ../noir/)) +function hex_to_fields_json { + # 1. split encoded hex into 64-character lines 3. encode as JSON array of hex strings + fold -w64 | jq -R -s -c 'split("\n") | map(select(length > 0)) | map("0x" + .)' +} + # Generate inputs for a given recursively verifying program. function run_proof_generation { local program=$1 + local native_build_dir=$(../cpp/scripts/native-preset-build-dir) + local bb=$(realpath ../cpp/$native_build_dir/bin/bb) local outdir=$(mktemp -d) trap "rm -rf $outdir" EXIT - local adjustment=16 local ipa_accumulation_flag="" cd ./acir_tests/assert_statement @@ -37,35 +41,37 @@ function run_proof_generation { # Adjust settings based on program type if [[ $program == *"rollup"* ]]; then - adjustment=26 ipa_accumulation_flag="--ipa_accumulation" fi # If the test program has zk in it's name would like to use the zk prover, so we empty the flag in this case. if [[ $program == *"zk"* ]]; then disable_zk="" fi - local prove_cmd="$bb prove --scheme ultra_honk $disable_zk --init_kzg_accumulator $ipa_accumulation_flag --output_format fields --write_vk -o $outdir -b ./target/program.json -w ./target/witness.gz" + local prove_cmd="$bb prove --scheme ultra_honk $disable_zk $ipa_accumulation_flag --write_vk -o $outdir -b ./target/program.json -w ./target/witness.gz" echo_stderr "$prove_cmd" dump_fail "$prove_cmd" - local vk_fields=$(cat "$outdir/vk_fields.json") - local vk_hash_fields=$(cat "$outdir/vk_hash_fields.json") - local public_inputs_fields=$(cat "$outdir/public_inputs_fields.json") - local proof_fields=$(cat "$outdir/proof_fields.json") - generate_toml "$program" "$vk_fields" "$vk_hash_fields" "$proof_fields" "$public_inputs_fields" + # Split the hex-encoded vk bytes into fields boundaries (but still hex-encoded), first making 64-character lines and then encoding as JSON. + # This used to be done by barretenberg itself, but with serialization now always being in field elements we can do it outside of bb. + local vk_fields=$(cat "$outdir/vk" | xxd -p -c 0 | hex_to_fields_json) + local vk_hash_field="\"0x$(cat "$outdir/vk_hash" | xxd -p -c 0)\"" + local public_inputs_fields=$(cat "$outdir/public_inputs" | xxd -p -c 0 | hex_to_fields_json) + local proof_fields=$(cat "$outdir/proof" | xxd -p -c 0 | hex_to_fields_json) + + generate_toml "$program" "$vk_fields" "$vk_hash_field" "$proof_fields" "$public_inputs_fields" } function generate_toml { local program=$1 local vk_fields=$2 - local vk_hash_fields=$3 + local vk_hash_field=$3 local proof_fields=$4 - local num_inner_public_inputs=$5 + local public_inputs_fields=$5 local output_file="../$program/Prover.toml" jq -nr \ - --arg key_hash "$vk_hash_fields" \ + --arg key_hash "$vk_hash_field" \ --argjson vk_f "$vk_fields" \ --argjson public_inputs_f "$public_inputs_fields" \ --argjson proof_f "$proof_fields" \ @@ -79,13 +85,24 @@ function generate_toml { } function regenerate_recursive_inputs { - local program=$1 # Compile the assert_statement test as it's used for the recursive tests. - COMPILE=2 ./scripts/run_test.sh assert_statement + cd ./acir_tests/assert_statement + local nargo=$(realpath ../../../../noir/noir-repo/target/release/nargo) + rm -rf target + $nargo compile --silence-warnings && $nargo execute + mv ./target/assert_statement.json ./target/program.json + mv ./target/assert_statement.gz ./target/witness.gz + cd ../.. parallel 'run_proof_generation {}' ::: $(ls internal_test_programs) } -export -f regenerate_recursive_inputs run_proof_generation generate_toml +export -f hex_to_fields_json regenerate_recursive_inputs run_proof_generation generate_toml + +function compile { + echo_header "Compiling acir_tests" + local nargo=$(realpath ../../noir/noir-repo/target/release/nargo) + denoise "parallel --joblog joblog.txt --line-buffered 'cd {} && rm -rf target && $nargo compile --silence-warnings && $nargo execute && mv ./target/\$(basename {}).json ./target/program.json && mv ./target/\$(basename {}).gz ./target/witness.gz' ::: ./acir_tests/*" +} function build { echo_header "acir_tests build" @@ -107,9 +124,8 @@ function build { # Generates the Prover.toml files for the recursive tests from the assert_statement test. denoise regenerate_recursive_inputs - # COMPILE=2 only compiles the test. - denoise "parallel --joblog joblog.txt --line-buffered 'COMPILE=2 ./scripts/run_test.sh \$(basename {})' ::: ./acir_tests/*" - + # Compile all tests + compile cache_upload $tests_tar acir_tests fi @@ -120,98 +136,87 @@ function build { function test { echo_header "acir_tests testing" - test_cmds | filter_test_cmds | parallelise + test_cmds | filter_test_cmds | parallelize } # Prints to stdout, one per line, the command to execute each individual test. # Paths are all relative to the repository root. # this function is used to generate the commands for running the tests. function test_cmds { + # NOTE: client-ivc commands are tested in yarn-project/end-to-end bench due to circular dependencies. + # Locally, you can do ./bootstrap.sh bench_ivc to run the 'tests' (benches with validation) + # non_recursive_tests include all of the non recursive test programs local non_recursive_tests=$(find ./acir_tests -maxdepth 1 -mindepth 1 -type d | \ grep -vE 'verify_honk_proof|verify_honk_zk_proof|verify_rollup_honk_proof') - local run_test=$(realpath --relative-to=$root ./scripts/run_test.sh) - local run_test_browser=$(realpath --relative-to=$root ./scripts/run_test_browser.sh) - local bbjs_bin="../ts/dest/node/main.js" + local scripts=$(realpath --relative-to=$root scripts) + local sol_prefix="$tests_hash:ISOLATE=1" # Solidity tests. Isolate because anvil. - local prefix="$tests_hash:ISOLATE=1" - echo "$prefix FLOW=sol_honk $run_test assert_statement" - echo "$prefix FLOW=sol_honk $run_test a_1_mul" - echo "$prefix FLOW=sol_honk $run_test slices" - echo "$prefix FLOW=sol_honk $run_test verify_honk_proof" - echo "$prefix FLOW=sol_honk_zk $run_test assert_statement" - echo "$prefix FLOW=sol_honk_zk $run_test a_1_mul" - echo "$prefix FLOW=sol_honk_zk $run_test slices" - echo "$prefix FLOW=sol_honk_zk $run_test verify_honk_proof" + # Test the solidity verifier with and without zk + for t in assert_statement a_1_mul slices verify_honk_proof; do + echo "$sol_prefix $scripts/bb_prove_sol_verify.sh $t --disable_zk" + echo "$sol_prefix $scripts/bb_prove_sol_verify.sh $t" + echo "$sol_prefix USE_OPTIMIZED_CONTRACT=true $scripts/bb_prove_sol_verify.sh $t --disable_zk" + done + # prove with bb cli and verify with bb.js classes + echo "$sol_prefix $scripts/bb_prove_bbjs_verify.sh a_1_mul" + echo "$sol_prefix $scripts/bb_prove_bbjs_verify.sh assert_statement" # bb.js browser tests. Isolate because server. - local prefix="$tests_hash:ISOLATE=1:NET=1:CPUS=8" - echo "$prefix:NAME=chrome_verify_honk_proof BROWSER=chrome $run_test_browser verify_honk_proof" - echo "$prefix:NAME=chrome_a_1_mul BROWSER=chrome $run_test_browser a_1_mul" - echo "$prefix:NAME=webkit_verify_honk_proof BROWSER=webkit $run_test_browser verify_honk_proof" - echo "$prefix:NAME=webkit_a_1_mul BROWSER=webkit $run_test_browser a_1_mul" + local browser_prefix="$tests_hash:ISOLATE=1:NET=1:CPUS=8" + echo "$browser_prefix $scripts/browser_prove.sh verify_honk_proof chrome" + echo "$browser_prefix $scripts/browser_prove.sh a_1_mul chrome" + echo "$browser_prefix $scripts/browser_prove.sh verify_honk_proof webkit" + echo "$browser_prefix $scripts/browser_prove.sh a_1_mul webkit" # bb.js tests. - local prefix=$tests_hash # ecdsa_secp256r1_3x through bb.js on node to check 256k support. - echo "$prefix BIN=$bbjs_bin SYS=ultra_honk_deprecated FLOW=prove_then_verify $run_test ecdsa_secp256r1_3x" + echo "$tests_hash $scripts/bbjs_prove.sh ecdsa_secp256r1_3x" # the prove then verify flow for UltraHonk. This makes sure we have the same circuit for different witness inputs. - echo "$prefix BIN=$bbjs_bin SYS=ultra_honk_deprecated FLOW=prove_then_verify $run_test a_6_array" - - # barretenberg-acir-tests-bb: - # Fold and verify an ACIR program stack using ClientIVC, recursively verify as part of the Tube circuit and produce and verify a Honk proof - echo "$prefix FLOW=prove_then_verify_tube $run_test a_6_array" + echo "$tests_hash $scripts/bbjs_prove.sh a_6_array" - # barretenberg-acir-tests-bb-ultra-honk: - # SYS decides which scheme will be used for the test. - # FLOW decides which script (prove, verify, prove_then_verify, etc.) will be ran for t in $non_recursive_tests; do - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify $run_test $(basename $t)" + echo "$tests_hash $scripts/bb_prove.sh $(basename $t)" done - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify $run_test assert_statement" + echo "$tests_hash $scripts/bb_prove.sh assert_statement" # Run the UH recursive verifier tests with ZK. - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify $run_test verify_honk_proof" - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify $run_test double_verify_honk_proof" + echo "$tests_hash $scripts/bb_prove.sh verify_honk_proof" + echo "$tests_hash $scripts/bb_prove.sh double_verify_honk_proof" # Run the UH recursive verifier tests without ZK. - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify DISABLE_ZK=true $run_test double_verify_honk_proof" + echo "$tests_hash $scripts/bb_prove.sh double_verify_honk_proof --disable_zk" # Run the ZK UH recursive verifier tests. - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify $run_test double_verify_honk_zk_proof" + echo "$tests_hash $scripts/bb_prove.sh double_verify_honk_zk_proof" # Run the ZK UH recursive verifier tests without ZK. - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify DISABLE_ZK=true $run_test double_verify_honk_zk_proof" + echo "$tests_hash $scripts/bb_prove.sh double_verify_honk_zk_proof --disable_zk" - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify HASH=keccak $run_test assert_statement" - # echo "$prefix SYS=ultra_honk FLOW=prove_then_verify HASH=starknet $run_test assert_statement" - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify ROLLUP=true $run_test verify_rollup_honk_proof" - # Run the assert_statement test with the --disable_zk flag. - echo "$prefix SYS=ultra_honk FLOW=prove_then_verify DISABLE_ZK=true $run_test assert_statement" + echo "$tests_hash $scripts/bb_prove.sh assert_statement --oracle_hash keccak" + # If starknet enabled: + #echo "$tests_hash $scripts/bb_prove.sh assert_statement --oracle_hash starknet" + # Test rollup verification (rollup uses --ipa_accumulation) + echo "$tests_hash $scripts/bb_prove.sh verify_rollup_honk_proof --ipa_accumulation" + # Run the assert_statement test with ZK disabled. + echo "$tests_hash $scripts/bb_prove.sh assert_statement --disable_zk" # prove and verify using bb.js classes - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_verify $run_test a_1_mul" - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_verify $run_test assert_statement" - - # prove with bb.js and verify with solidity verifier - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_sol_verify $run_test a_1_mul" - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_sol_verify $run_test assert_statement" - - # prove with bb cli and verify with bb.js classes - echo "$prefix SYS=ultra_honk FLOW=bb_prove_bbjs_verify $run_test a_1_mul" - echo "$prefix SYS=ultra_honk FLOW=bb_prove_bbjs_verify $run_test assert_statement" + echo "$tests_hash $scripts/bbjs_prove.sh a_1_mul" + echo "$tests_hash $scripts/bbjs_prove.sh assert_statement" # prove with bb.js and verify with bb cli - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_bb_verify $run_test a_1_mul" - echo "$prefix SYS=ultra_honk FLOW=bbjs_prove_bb_verify $run_test assert_statement" + echo "$tests_hash $scripts/bbjs_prove_bb_verify.sh a_1_mul" + echo "$tests_hash $scripts/bbjs_prove_bb_verify.sh assert_statement" } function bench_cmds { + local dir=$(realpath --relative-to=$root .) echo "$tests_hash:CPUS=16 barretenberg/acir_tests/scripts/run_bench.sh ultra_honk_rec_wasm_memory" \ - "'BIN=../ts/dest/node/main.js SYS=ultra_honk_deprecated FLOW=prove_then_verify ./scripts/run_test.sh verify_honk_proof'" + "'scripts/bbjs_legacy_cli_prove.sh verify_honk_proof'" } # TODO(https://github.com/AztecProtocol/barretenberg/issues/1254): More complete testing, including failure tests function bench { rm -rf bench-out && mkdir -p bench-out - bench_cmds | STRICT_SCHEDULING=1 parallelise + bench_cmds | STRICT_SCHEDULING=1 parallelize } case "$cmd" in @@ -228,6 +233,9 @@ case "$cmd" in "hash") echo $tests_hash ;; + "compile") + compile + ;; test|test_cmds|bench|bench_cmds) $cmd ;; diff --git a/barretenberg/acir_tests/browser-test-app/src/index.ts b/barretenberg/acir_tests/browser-test-app/src/index.ts index d65061b85519..7dca690f32de 100644 --- a/barretenberg/acir_tests/browser-test-app/src/index.ts +++ b/barretenberg/acir_tests/browser-test-app/src/index.ts @@ -30,17 +30,16 @@ function installUltraHonkGlobals() { } async function verify(proofData: ProofData, verificationKey: Uint8Array) { - const { BarretenbergVerifier } = await import("@aztec/bb.js"); + const { UltraHonkVerifierBackend } = await import("@aztec/bb.js"); logger.debug(`verifying...`); - const verifier = new BarretenbergVerifier(); - const verified = await verifier.verifyUltraHonkProof( - proofData, - verificationKey + const backend = new UltraHonkVerifierBackend(); + const verified = await backend.verifyProof( + {...proofData, verificationKey} ); logger.debug(`verified: ${verified}`); - await verifier.destroy(); + await backend.destroy(); logger.debug("test complete."); return verified; @@ -88,7 +87,10 @@ function installClientIvcGlobal() { threads, logger: console.log, }); - const [proof, verificationKey] = await backend.prove(witnessBufs, vkBufs); + const [_, proof, verificationKey] = await backend.prove( + witnessBufs, + vkBufs + ); await backend.destroy(); return { proof, verificationKey }; } diff --git a/barretenberg/acir_tests/flows/all_cmds.sh b/barretenberg/acir_tests/flows/all_cmds.sh deleted file mode 100755 index 7d1a9722d538..000000000000 --- a/barretenberg/acir_tests/flows/all_cmds.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -set -eu - -VFLAG=${VERBOSE:+-v} -BFLAG="-b ./target/program.json" -FLAGS="-c $CRS_PATH $VFLAG" -# the commands called here are subcommands of the OLD_API command in the native bb binary, -# but no such refactoring was done to the node binary. This is because the node binary is -# deprecated and UltraPlonk is also deprecated. -MAYBE_OLD_API=${NATIVE:+OLD_API} - -# Test we can perform the proof/verify flow. -$BIN $MAYBE_OLD_API gates $FLAGS $BFLAG > /dev/null -$BIN $MAYBE_OLD_API prove -o proof $FLAGS $BFLAG -$BIN $MAYBE_OLD_API write_vk -o vk $FLAGS $BFLAG -$BIN $MAYBE_OLD_API write_pk -o pk $FLAGS $BFLAG -$BIN $MAYBE_OLD_API verify -k vk -p proof $FLAGS - -# Check supplemental functions. -# Grep to determine success. -$BIN $MAYBE_OLD_API contract -k vk $BFLAG -o - | grep "Verification Key Hash" > /dev/null -# Use jq to determine success, and also check result not empty. -OUTPUT=$($BIN $MAYBE_OLD_API proof_as_fields -p proof -k vk -o - | jq .) -[ -n "$OUTPUT" ] || exit 1 -OUTPUT=$($BIN $MAYBE_OLD_API vk_as_fields -k vk -o - | jq .) -[ -n "$OUTPUT" ] || exit 1 diff --git a/barretenberg/acir_tests/flows/bb_prove_bbjs_verify.sh b/barretenberg/acir_tests/flows/bb_prove_bbjs_verify.sh deleted file mode 100755 index 169b92ece3c5..000000000000 --- a/barretenberg/acir_tests/flows/bb_prove_bbjs_verify.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# prove with bb.js and verify using bb classes -set -eu - -if [ "${SYS:-}" != "ultra_honk" ]; then - echo "Error: This flow only supports ultra_honk" - exit 1 -fi - -artifact_dir=$(realpath ./target) -output_dir=$artifact_dir/bbjs-bb-tmp -mkdir -p $output_dir - -# Cleanup on exit -trap "rm -rf $output_dir" EXIT - -# Generate the VK using BB CLI -$BIN write_vk \ - --scheme ultra_honk \ - -b $artifact_dir/program.json \ - -o $output_dir - -# Generate the proof using BB CLI (save as both bytes and fields) -$BIN prove \ - --scheme ultra_honk \ - -b $artifact_dir/program.json \ - -w $artifact_dir/witness.gz \ - -k $output_dir/vk \ - --output_format bytes_and_fields \ - -o $output_dir - -# Verify the proof with bb.js classes -node ../../bbjs-test verify \ - -d $output_dir diff --git a/barretenberg/acir_tests/flows/bbjs_prove_bb_verify.sh b/barretenberg/acir_tests/flows/bbjs_prove_bb_verify.sh deleted file mode 100755 index 241400698779..000000000000 --- a/barretenberg/acir_tests/flows/bbjs_prove_bb_verify.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -# prove with bb.js and verify using bb cli -set -eu - -if [ "${SYS:-}" != "ultra_honk" ]; then - echo "Error: This flow only supports ultra_honk" - exit 1 -fi - -artifact_dir=$(realpath ./target) -output_dir=$artifact_dir/bb-bbjs-tmp -mkdir -p $output_dir - -# Cleanup on exit -trap "rm -rf $output_dir" EXIT - -# Writes the proof, public inputs ./target; this also writes the VK -node ../../bbjs-test prove \ - -b $artifact_dir/program.json \ - -w $artifact_dir/witness.gz \ - -o $output_dir - -proof_bytes=$(cat $output_dir/proof | xxd -p) -public_inputs=$(cat $output_dir/public_inputs_fields.json | jq -r '.[]') - -public_inputs_bytes="" -for input in $public_inputs; do - public_inputs_bytes+=$input -done - -# Combine proof header and the proof to a single file -echo -n $proof_bytes | xxd -r -p > $output_dir/proof -echo -n $public_inputs_bytes | xxd -r -p > $output_dir/public_inputs -echo "$BIN verify \ - --scheme ultra_honk \ - -k $output_dir/vk \ - -p $output_dir/proof \ - -i $output_dir/public_inputs" - -# Verify the proof with bb cli -$BIN verify \ - --scheme ultra_honk \ - -k $output_dir/vk \ - -p $output_dir/proof \ - -i $output_dir/public_inputs diff --git a/barretenberg/acir_tests/flows/bbjs_prove_sol_verify.sh b/barretenberg/acir_tests/flows/bbjs_prove_sol_verify.sh deleted file mode 100755 index a23bf447e391..000000000000 --- a/barretenberg/acir_tests/flows/bbjs_prove_sol_verify.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# prove using bb.js and verify using solidity verifier -set -eu - -if [ "${SYS:-}" != "ultra_honk" ]; then - echo "Error: This flow only supports ultra_honk" - exit 1 -fi - -artifact_dir=$(realpath ./target) -output_dir=$artifact_dir/bbjs-sol-tmp -mkdir -p $output_dir - -# Cleanup on exit -trap "rm -rf $output_dir" EXIT - -# Generate the proof and VK -node ../../bbjs-test prove \ - -b $artifact_dir/program.json \ - -w $artifact_dir/witness.gz \ - -o $output_dir \ - --oracle-hash keccakZK - -# Write the solidity verifier to ./target -export VK=$output_dir/vk -export VERIFIER_PATH="$output_dir/Verifier.sol" - -# Use the BB CLI to write the solidity verifier - this can also be done with bb.js -$BIN write_solidity_verifier --scheme ultra_honk -k $VK -o $VERIFIER_PATH - -# Verify the proof using the solidity verifier -export PROOF=$output_dir/proof -export HAS_ZK="true" -export PROOF_AS_FIELDS=$output_dir/proof_fields.json -export PUBLIC_INPUTS_AS_FIELDS=$output_dir/public_inputs_fields.json -export TEST_PATH=$(realpath "../../sol-test/HonkTest.sol") -export TESTING_HONK="true" -export TEST_NAME=$(basename $(realpath ./)) - -node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/flows/bbjs_prove_verify.sh b/barretenberg/acir_tests/flows/bbjs_prove_verify.sh deleted file mode 100755 index 3eec6dadf8e4..000000000000 --- a/barretenberg/acir_tests/flows/bbjs_prove_verify.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# prove and verify using bb.js classes -set -eu - -if [ "${SYS:-}" != "ultra_honk" ]; then - echo "Error: This flow only supports ultra_honk" - exit 1 -fi - -artifact_dir=$(realpath ./target) -output_dir=$artifact_dir/bbjs-tmp -mkdir -p $output_dir - -# Cleanup on exit -trap "rm -rf $output_dir" EXIT - -# Writes the proof, public inputs ./target; this also writes the VK -node ../../bbjs-test prove \ - -b $artifact_dir/program.json \ - -w $artifact_dir/witness.gz \ - -o $output_dir \ - --multi-threaded - -# Verify the proof by reading the files in ./target -node ../../bbjs-test verify \ - -d $output_dir diff --git a/barretenberg/acir_tests/flows/fail.sh b/barretenberg/acir_tests/flows/fail.sh deleted file mode 100755 index c3adb8059ab7..000000000000 --- a/barretenberg/acir_tests/flows/fail.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -# Fails, for testing the harness. -echo Forced failure. -false diff --git a/barretenberg/acir_tests/flows/prove_and_verify.sh b/barretenberg/acir_tests/flows/prove_and_verify.sh deleted file mode 100755 index 8fc1ea99a6dc..000000000000 --- a/barretenberg/acir_tests/flows/prove_and_verify.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# prove_and_verify produces no output, so is parallel safe. -set -eu - -flags="-c $CRS_PATH ${VERBOSE:+-v}" -[ "${RECURSIVE}" = "true" ] && flags+=" --recursive" - -case ${SYS:-} in - "") - cmd=prove_and_verify - ;; - *) - cmd=prove_and_verify_$SYS - ;; -esac - -$BIN $cmd $flags -b ./target/program.json diff --git a/barretenberg/acir_tests/flows/prove_and_verify_program.sh b/barretenberg/acir_tests/flows/prove_and_verify_program.sh deleted file mode 100755 index b9963875f105..000000000000 --- a/barretenberg/acir_tests/flows/prove_and_verify_program.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -# prove_and_verify produces no output, so is parallel safe. -set -eu - -VFLAG=${VERBOSE:+-v} -FLAGS="-c $CRS_PATH $VFLAG" -[ "${RECURSIVE}" = "true" ] && FLAGS+=" --recursive" - -$BIN prove_and_verify_${SYS}_program $FLAGS -b ./target/program.json diff --git a/barretenberg/acir_tests/flows/prove_then_verify.sh b/barretenberg/acir_tests/flows/prove_then_verify.sh deleted file mode 100755 index 83b580053bb8..000000000000 --- a/barretenberg/acir_tests/flows/prove_then_verify.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -# prove_then_verify produces intermediate state. We use process substitution to make parallel safe. -set -eu - -BFLAG="-b ./target/program.json" -FLAGS="-c $CRS_PATH ${VERBOSE:+-v}" -[ "${RECURSIVE}" = "true" ] && FLAGS+=" --recursive" - -# Test we can perform the proof/verify flow. -# This ensures we test independent pk construction through real/garbage witness data paths. -# We use process substitution pipes to avoid temporary files, which need cleanup, and can collide with parallelism. - -case ${SYS:-} in - "") - # Deprecated; used for old node cli - [ -n "${SYS:-}" ] && SYS="_$SYS" || SYS="" - $BIN verify$SYS $FLAGS \ - -k <($BIN write_vk$SYS -o - $FLAGS $BFLAG) \ - -p <($BIN prove$SYS -o - $FLAGS $BFLAG) - ;; - "ultra_honk") - FLAGS+=" --scheme $SYS --oracle_hash ${HASH:-poseidon2}" - [ "${ROLLUP:-false}" = "true" ] && FLAGS+=" --ipa_accumulation" - [ "${RECURSIVE}" = "true" ] && FLAGS+=" --init_kzg_accumulator" - # DISABLE_ZK controls whether the zero-knowledge property is disabled. - # the flag is by default false, and when true, --disable_zk is added to the flags. - [ "${DISABLE_ZK:-false}" = "true" ] && FLAGS+=" --disable_zk" - - OUTDIR=$(mktemp -d) - trap "rm -rf $OUTDIR" EXIT - $BIN write_vk $FLAGS $BFLAG -o $OUTDIR - $BIN prove $FLAGS $BFLAG -k $OUTDIR/vk -o $OUTDIR - $BIN verify $FLAGS \ - -k $OUTDIR/vk \ - -p $OUTDIR/proof \ - -i $OUTDIR/public_inputs - ;; - "ultra_honk_deprecated") - # TODO(https://github.com/AztecProtocol/barretenberg/issues/1434) deprecated flow is necessary until we finish C++ api refactor and then align ts api - SYS_DEP=_ultra_honk - OUTDIR=$(mktemp -d) - trap "rm -rf $OUTDIR" EXIT - $BIN write_vk$SYS_DEP $FLAGS $BFLAG -o $OUTDIR/vk - $BIN prove$SYS_DEP -o $OUTDIR/proof $FLAGS $BFLAG -k $OUTDIR/vk - $BIN verify$SYS_DEP $FLAGS \ - -k $OUTDIR/vk \ - -p $OUTDIR/proof - ;; - *) - [ -n "${SYS:-}" ] && SYS="_$SYS" || SYS="" - $BIN verify$SYS $FLAGS \ - -k <($BIN write_vk$SYS -o - $FLAGS $BFLAG) \ - -p <($BIN prove$SYS -o - $FLAGS $BFLAG) - ;; -esac diff --git a/barretenberg/acir_tests/flows/prove_then_verify_client_ivc.sh b/barretenberg/acir_tests/flows/prove_then_verify_client_ivc.sh deleted file mode 100755 index 957867df46f6..000000000000 --- a/barretenberg/acir_tests/flows/prove_then_verify_client_ivc.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Create intermediate state in a directory. Uses a temp dir to ensure parallel safe and cleans up on exit. -# TODO this is unused -set -eux - -CRS_PATH=${CRS_PATH:-$HOME/.bb-crs} -BIN=$(realpath ${BIN:-../cpp/build/bin/bb}) - -[ -n "${1:-}" ] && cd ./acir_tests/$1 - -outdir=$(mktemp -d) -trap "rm -rf $outdir" EXIT - -flags="--scheme client_ivc -c $CRS_PATH ${VERBOSE:+-v}" - -parallel ::: \ - "$BIN prove $flags -i target/ivc-inputs.msgpack $INFLAG --output_format proof -o $outdir" \ - "$BIN write_vk $flags -i target/ivc-inputs.msgpack $INFLAG --verifier_type ivc -o $outdir" -$BIN verify $flags -p $outdir/proof -k $outdir/vk diff --git a/barretenberg/acir_tests/flows/prove_then_verify_tube.sh b/barretenberg/acir_tests/flows/prove_then_verify_tube.sh deleted file mode 100755 index 97a1f7a6032e..000000000000 --- a/barretenberg/acir_tests/flows/prove_then_verify_tube.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -set -eux - -mkdir -p ./proofs - -CRS_PATH=${CRS_PATH:-$HOME/.bb-crs} -BIN=$(realpath ${BIN:-../cpp/build/bin/bb}) - -[ -n "${1:-}" ] && cd ./acir_tests/$1 - -outdir=$(mktemp -d) -trap "rm -rf $outdir" EXIT - -# TODO(https://github.com/AztecProtocol/barretenberg/issues/1252): deprecate in favor of normal proving flow -$BIN OLD_API write_arbitrary_valid_client_ivc_proof_and_vk_to_file -c $CRS_PATH ${VERBOSE:+-v} -o $outdir -$BIN prove_tube -c $CRS_PATH ${VERBOSE:+-v} -k $outdir/vk -o $outdir -# TODO(https://github.com/AztecProtocol/barretenberg/issues/1322): Just call verify. -$BIN verify_tube -c $CRS_PATH ${VERBOSE:+-v} -o $outdir diff --git a/barretenberg/acir_tests/flows/sol.sh b/barretenberg/acir_tests/flows/sol.sh deleted file mode 100755 index bcb06e9fe392..000000000000 --- a/barretenberg/acir_tests/flows/sol.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh -set -eu - -VFLAG=${VERBOSE:+-v} -BFLAG="-b ./target/program.json" -FLAGS="-c $CRS_PATH $VFLAG" - -export PROOF="$PWD/sol_proof" -export PROOF_AS_FIELDS="$PWD/sol_proof_fields.json" -export VK="$PWD/sol_vk" - -# Create a proof, write the solidity contract, write the proof as fields in order to extract the public inputs -$BIN OLD_API prove -o $PROOF $FLAGS -$BIN OLD_API write_vk -o $VK $FLAGS -$BIN OLD_API proof_as_fields -k $VK $FLAGS -p $PROOF -$BIN OLD_API contract -k $VK $FLAGS $BFLAG -o Key.sol - -# Export the paths to the environment variables for the js test runner -export KEY_PATH="$PWD/Key.sol" -export VERIFIER_PATH=$(realpath "../../sol-test/Verifier.sol") -export TEST_PATH=$(realpath "../../sol-test/Test.sol") -export BASE_PATH=$(realpath "../../../sol/src/ultra/BaseUltraVerifier.sol") - -# Use solcjs to compile the generated key contract with the template verifier and test contract -# index.js will start an anvil, on a random port -# Deploy the verifier then send a test transaction -export TEST_NAME=$(basename $(pwd)) -node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/flows/sol_honk.sh b/barretenberg/acir_tests/flows/sol_honk.sh deleted file mode 100755 index 07258678a98b..000000000000 --- a/barretenberg/acir_tests/flows/sol_honk.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/sh -set -eu - -VFLAG=${VERBOSE:+-v} -BFLAG="-b ./target/program.json" -FLAGS="-c $CRS_PATH $VFLAG --scheme ultra_honk --disable_zk" -PROVE_FLAGS="$FLAGS $BFLAG --oracle_hash keccak --output_format bytes_and_fields --write_vk" -VERIFY_FLAGS="$FLAGS --oracle_hash keccak" - -outdir=$(mktemp -d) -trap "rm -rf $outdir" EXIT - -# Export the paths to the environment variables for the js test runner -export PUBLIC_INPUTS="$outdir/public_inputs" -export PUBLIC_INPUTS_AS_FIELDS="$outdir/public_inputs_fields.json" -export PROOF="$outdir/proof" -export PROOF_AS_FIELDS="$outdir/proof_fields.json" -export VK="$outdir/vk" -export VERIFIER_CONTRACT="$outdir/Verifier.sol" - -# Create a proof, write the solidity contract, write the proof as fields in order to extract the public inputs -$BIN prove $PROVE_FLAGS -o $outdir -$BIN verify $VERIFY_FLAGS -i $PUBLIC_INPUTS -k $VK -p $PROOF -$BIN write_solidity_verifier $FLAGS -k $VK -o $VERIFIER_CONTRACT - -# Export the paths to the environment variables for the js test runner -export VERIFIER_PATH="$outdir/Verifier.sol" -export TEST_PATH=$(realpath "../../sol-test/HonkTest.sol") -export TESTING_HONK="true" - -# Use solcjs to compile the generated key contract with the template verifier and test contract -# index.js will start an anvil, on a random port -# Deploy the verifier then send a test transaction -export TEST_NAME=$(basename $outdir) -node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/flows/sol_honk_zk.sh b/barretenberg/acir_tests/flows/sol_honk_zk.sh deleted file mode 100755 index 93c93c1be692..000000000000 --- a/barretenberg/acir_tests/flows/sol_honk_zk.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/sh -set -eux - -VFLAG=${VERBOSE:+-v} -BFLAG="-b ./target/program.json" -FLAGS="-c $CRS_PATH $VFLAG --scheme ultra_honk" -PROTOCOL_FLAGS=" --honk_recursion 1 --oracle_hash keccak" - -outdir=$(mktemp -d) -trap "rm -rf $outdir" EXIT - -# Export the paths to the environment variables for the js test runner -export PUBLIC_INPUTS="$outdir/public_inputs" -export PUBLIC_INPUTS_AS_FIELDS="$outdir/public_inputs_fields.json" -export PROOF="$outdir/proof" -export PROOF_AS_FIELDS="$outdir/proof_fields.json" -export VK="$outdir/vk" -export VERIFIER_CONTRACT="$outdir/Verifier.sol" - -# Create a proof, write the solidity contract, write the proof as fields in order to extract the public inputs -$BIN prove -o $outdir $FLAGS $BFLAG $PROTOCOL_FLAGS --output_format bytes_and_fields --write_vk -$BIN verify -i $PUBLIC_INPUTS -k $VK -p $PROOF $FLAGS $PROTOCOL_FLAGS -$BIN write_solidity_verifier $FLAGS -k $VK -o $VERIFIER_CONTRACT - -# Export the paths to the environment variables for the js test runner -export VERIFIER_PATH="$outdir/Verifier.sol" -export TEST_PATH=$(realpath "../../sol-test/HonkTest.sol") -export TESTING_HONK="true" -export HAS_ZK="true" - -# Use solcjs to compile the generated key contract with the template verifier and test contract -# index.js will start an anvil, on a random port -# Deploy the verifier then send a test transaction -export TEST_NAME=$(basename $outdir) -node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/headless-test/package.json b/barretenberg/acir_tests/headless-test/package.json index 85c0efb33ce0..20e65c7af890 100644 --- a/barretenberg/acir_tests/headless-test/package.json +++ b/barretenberg/acir_tests/headless-test/package.json @@ -11,7 +11,7 @@ "chalk": "^5.3.0", "commander": "^12.1.0", "playwright": "1.49.0", - "puppeteer": "^22.4.1" + "puppeteer": "^24.22.3" }, "devDependencies": { "ts-node": "^10.9.2", diff --git a/barretenberg/acir_tests/package.json b/barretenberg/acir_tests/package.json index feb5055a88d6..5e443a93352f 100644 --- a/barretenberg/acir_tests/package.json +++ b/barretenberg/acir_tests/package.json @@ -10,5 +10,8 @@ ], "dependencies": { "pino": "^9.5.0" + }, + "resolutions": { + "ws": "^8.17.1" } } diff --git a/barretenberg/acir_tests/scripts/bb_prove.sh b/barretenberg/acir_tests/scripts/bb_prove.sh new file mode 100755 index 000000000000..0e6f7920bbdb --- /dev/null +++ b/barretenberg/acir_tests/scripts/bb_prove.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +export HARDWARE_CONCURRENCY=8 + +cd ../acir_tests/$1 + +bb=$(../../../cpp/scripts/find-bb) + +shift +# Base flags + our commandline args +flags="-v --scheme ultra_honk $*" + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Generate VK +$bb write_vk $flags -b target/program.json -o output-$$ + +# Prove +$bb prove $flags -b target/program.json -k output-$$/vk -o output-$$ + +# Verify +$bb verify $flags \ + -k output-$$/vk \ + -p output-$$/proof \ + -i output-$$/public_inputs diff --git a/barretenberg/acir_tests/scripts/bb_prove_bbjs_verify.sh b/barretenberg/acir_tests/scripts/bb_prove_bbjs_verify.sh new file mode 100755 index 000000000000..9b8b1eb2139d --- /dev/null +++ b/barretenberg/acir_tests/scripts/bb_prove_bbjs_verify.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +cd ../acir_tests/$1 + +export HARDWARE_CONCURRENCY=8 + +bb=$(../../../cpp/scripts/find-bb) + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Generate the VK using BB CLI +$bb write_vk \ + --scheme ultra_honk \ + -b target/program.json \ + -o output-$$ + +# Generate the proof using BB CLI (save as both bytes and fields) +$bb prove \ + --scheme ultra_honk \ + -b target/program.json \ + -w target/witness.gz \ + -k output-$$/vk \ + -o output-$$ + +# Verify the proof with bb.js classes +node ../../bbjs-test verify \ + -d output-$$ diff --git a/barretenberg/acir_tests/scripts/bb_prove_sol_verify.sh b/barretenberg/acir_tests/scripts/bb_prove_sol_verify.sh new file mode 100755 index 000000000000..c33a4bf988fe --- /dev/null +++ b/barretenberg/acir_tests/scripts/bb_prove_sol_verify.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +cd ../acir_tests/$1 + +export HARDWARE_CONCURRENCY=8 + +bb=$(../../../cpp/scripts/find-bb) + +# Build base flags +flags="-v --scheme ultra_honk" + +# Add any additional arguments passed from command line +shift +for arg in "$@"; do + flags+=" $arg" +done + +USE_OPTIMIZED_CONTRACT=${USE_OPTIMIZED_CONTRACT:-false} + +write_contract_flags=$flags +if [[ "$USE_OPTIMIZED_CONTRACT" == "true" ]]; then + write_contract_flags+=" --optimized" +fi + +# Check if --disable_zk is in the flags to determine HAS_ZK +if [[ "$flags" == *"--disable_zk"* ]]; then + has_zk="false" +else + has_zk="true" +fi + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Create a proof, write the solidity contract, write the proof as fields in order to extract the public inputs +$bb prove $flags -b target/program.json --oracle_hash keccak --write_vk -o output-$$ +$bb verify $flags --oracle_hash keccak -i output-$$/public_inputs -k output-$$/vk -p output-$$/proof +$bb write_solidity_verifier $write_contract_flags -k output-$$/vk -o output-$$/Verifier.sol + +# Use solcjs to compile the generated key contract with the template verifier and test contract +# index.js will start an anvil, on a random port +# Deploy the verifier then send a test transaction +PROOF="output-$$/proof" \ +PUBLIC_INPUTS="output-$$/public_inputs" \ +VERIFIER_PATH="output-$$/Verifier.sol" \ +TEST_PATH="../../sol-test/HonkTest.sol" \ +HAS_ZK="$has_zk" \ +TEST_NAME=$(basename output-$$) \ + node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/scripts/bbjs_legacy_cli_prove.sh b/barretenberg/acir_tests/scripts/bbjs_legacy_cli_prove.sh new file mode 100755 index 000000000000..e9fa668f75ac --- /dev/null +++ b/barretenberg/acir_tests/scripts/bbjs_legacy_cli_prove.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +export HARDWARE_CONCURRENCY=8 + +cd ../acir_tests/$1 + +# NOTE The bb.js main file is deprecated! +bbjs_bin="../../../ts/dest/node/main.js" + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Generate VK +node $bbjs_bin write_vk_ultra_honk -v -b target/program.json -o output-$$/vk + +# Prove +node $bbjs_bin prove_ultra_honk -o output-$$/proof -v -b target/program.json -k output-$$/vk + +# Verify +node $bbjs_bin verify_ultra_honk -v \ + -k output-$$/vk \ + -p output-$$/proof diff --git a/barretenberg/acir_tests/scripts/bbjs_prove.sh b/barretenberg/acir_tests/scripts/bbjs_prove.sh new file mode 100755 index 000000000000..d67f4330909d --- /dev/null +++ b/barretenberg/acir_tests/scripts/bbjs_prove.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +cd ../acir_tests/$1 + +export HARDWARE_CONCURRENCY=8 + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Writes the proof, public inputs ./target; this also writes the VK +node ../../bbjs-test prove \ + -b target/program.json \ + -w target/witness.gz \ + -o output-$$ \ + --multi-threaded + +# Verify the proof by reading the files in ./target +node ../../bbjs-test verify \ + -d output-$$ diff --git a/barretenberg/acir_tests/scripts/bbjs_prove_bb_verify.sh b/barretenberg/acir_tests/scripts/bbjs_prove_bb_verify.sh new file mode 100755 index 000000000000..b809adf477dc --- /dev/null +++ b/barretenberg/acir_tests/scripts/bbjs_prove_bb_verify.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +export HARDWARE_CONCURRENCY=8 + +cd ../acir_tests/$1 + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Writes the proof, public inputs ./target; this also writes the VK +node ../../bbjs-test prove \ + -b target/program.json \ + -w target/witness.gz \ + -o output-$$ + +# The proof and public_inputs are already in binary format from bbjs-test + +bb=$(../../../cpp/scripts/find-bb) +# Verify the proof with bb cli +$bb verify \ + --scheme ultra_honk \ + -k output-$$/vk \ + -p output-$$/proof \ + -i output-$$/public_inputs diff --git a/barretenberg/acir_tests/scripts/bbjs_prove_sol_verify.sh b/barretenberg/acir_tests/scripts/bbjs_prove_sol_verify.sh new file mode 100755 index 000000000000..1574b00cbb97 --- /dev/null +++ b/barretenberg/acir_tests/scripts/bbjs_prove_sol_verify.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +export HARDWARE_CONCURRENCY=8 + +cd ../acir_tests/$1 + +mkdir -p output-$$ +trap "rm -rf output-$$" EXIT + +# Generate the proof and VK +node ../../bbjs-test prove \ + -b target/program.json \ + -w target/witness.gz \ + -o output-$$ \ + --oracle-hash $oracle_hash + +bb=$(../../../cpp/scripts/find-bb) + +# Default to keccakZK for solidity compatibility +oracle_hash="keccakZK" +has_zk="true" + +# Process additional arguments +shift +for arg in "$@"; do + if [[ "$arg" == "--disable_zk" ]]; then + has_zk="false" + oracle_hash="keccak" + fi +done + +# Use the BB CLI to write the solidity verifier - this can also be done with bb.js +$bb write_solidity_verifier --scheme ultra_honk -k output-$$/vk -o output-$$/Verifier.sol + +# Verify the proof using the solidity verifier +PROOF="output-$$/proof" \ +PUBLIC_INPUTS="output-$$/public_inputs" \ +VERIFIER_PATH="output-$$/Verifier.sol" \ +TEST_PATH="../../sol-test/HonkTest.sol" \ +HAS_ZK="$has_zk" \ +TEST_NAME=$(basename $(realpath .)) \ + node ../../sol-test/src/index.js diff --git a/barretenberg/acir_tests/scripts/bench_acir_tests.sh b/barretenberg/acir_tests/scripts/bench_acir_tests.sh deleted file mode 100755 index 70d94d34497f..000000000000 --- a/barretenberg/acir_tests/scripts/bench_acir_tests.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$0")" - -TEST_NAMES=("$@") -THREADS=(1 4 16 32 64) -BENCHMARKS=$LOG_FILE - -if [[ -z "${LOG_FILE}" ]]; then - BENCHMARKS=$(mktemp) -fi - -if [ "${#TEST_NAMES[@]}" -eq 0 ]; then - TEST_NAMES=$(find acir_tests/bench_* -maxdepth 0 -type d -printf '%f ') -fi - -for TEST in ${TEST_NAMES[@]}; do - for HC in ${THREADS[@]}; do - HARDWARE_CONCURRENCY=$HC BENCHMARK_FD=3 ./run_acir_tests.sh $TEST 3>>$BENCHMARKS - done -done - -# Build results into string with \n delimited rows and space delimited values. -TABLE_DATA="" -for TEST in ${TEST_NAMES[@]}; do - GATE_COUNT=$(jq -r --arg test "$TEST" 'select(.eventName == "gate_count" and .acir_test == $test) | .value' $BENCHMARKS | uniq) - SUBGROUP_SIZE=$(jq -r --arg test "$TEST" 'select(.eventName == "subgroup_size" and .acir_test == $test) | .value' $BENCHMARKS | uniq) - # Name in col 1, gate count in col 2, subgroup size in col 3. - TABLE_DATA+="$TEST $GATE_COUNT $SUBGROUP_SIZE" - # Each thread timing in subsequent cols. - for HC in "${THREADS[@]}"; do - RESULT=$(cat $BENCHMARKS | jq -r --arg test "$TEST" --argjson hc $HC 'select(.eventName == "proof_construction_time" and .acir_test == $test and .threads == $hc) | .value') - TABLE_DATA+=" $RESULT" - done - TABLE_DATA+=$'\n' -done - -# Trim the trailing newline. -TABLE_DATA="${TABLE_DATA%$'\n'}" - -echo -echo Table represents time in ms to build circuit and proof for each test on n threads. -echo Ignores proving key construction. -echo -# Use awk to print the table -echo -e "$TABLE_DATA" | awk -v threads="${THREADS[*]}" 'BEGIN { - split(threads, t, " "); - len_threads = length(t); - print "+--------------------------+------------+---------------+" genseparator(len_threads); - print "| Test | Gate Count | Subgroup Size |" genthreadheaders(t, len_threads); - print "+--------------------------+------------+---------------+" genseparator(len_threads); -} -{ - printf("| %-24s | %-10s | %-13s |", $1, $2, $3); - for (i = 4; i <= len_threads+3; i++) { - printf " %9s |", $(i); - } - print ""; -} -END { - print "+--------------------------+------------+---------------+" genseparator(len_threads); -} -function genseparator(len, res) { - for (i = 1; i <= len; i++) res = res "-----------+"; - return res; -} -function genthreadheaders(t, len, res) { - for (i = 1; i <= len; i++) res = res sprintf(" %9s |", t[i]); - return res; -} -' - -if [[ -z "${LOG_FILE}" ]]; then - rm $BENCHMARKS -fi diff --git a/barretenberg/acir_tests/scripts/browser_prove.sh b/barretenberg/acir_tests/scripts/browser_prove.sh new file mode 100755 index 000000000000..0608e6530554 --- /dev/null +++ b/barretenberg/acir_tests/scripts/browser_prove.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source +cd ../acir_tests/$1 +export BROWSER=$2 + +# Launch browser server +dump_fail \ + "cd ../../browser-test-app && ../node_modules/.bin/serve -n -L -p 8080 -c ../serve.json dest" > /dev/null & +while ! nc -z localhost 8080 &>/dev/null; do sleep 1; done; + +# Use the browser binary for the test +../../headless-test/bb.js.browser prove_and_verify -b target/program.json -v diff --git a/barretenberg/acir_tests/scripts/run_bench.sh b/barretenberg/acir_tests/scripts/run_bench.sh index c63a79875f75..9674ff4115c4 100755 --- a/barretenberg/acir_tests/scripts/run_bench.sh +++ b/barretenberg/acir_tests/scripts/run_bench.sh @@ -14,6 +14,7 @@ mkdir -p ./bench-out bash -c "$cmd" 2>&1 | \ tee /dev/stderr | grep "mem: " | - tail -1 | sed -e 's/.*mem: \([0-9.]\+\).*/\1/' | + sort -un | + tail -n 1 | jq -n --arg name $name '[{name: $name, value: input, unit: "MiB"}]' > ./bench-out/$name.bench.json diff --git a/barretenberg/acir_tests/scripts/run_test.sh b/barretenberg/acir_tests/scripts/run_test.sh deleted file mode 100755 index 721456345094..000000000000 --- a/barretenberg/acir_tests/scripts/run_test.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -source $(git rev-parse --show-toplevel)/ci3/source - -cd .. - -TEST_NAME=$1 - -COMPILE=${COMPILE:-0} -native_build_dir=$(../cpp/scripts/native-preset-build-dir) -export BIN=$(realpath ${BIN:-../cpp/$native_build_dir/bin/bb}) -export CRS_PATH=${CRS_PATH:-$HOME/.bb-crs} -FLOW=${FLOW:-prove_and_verify} -export RECURSIVE=${RECURSIVE:-false} -export HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-8} -RAYON_NUM_THREADS=${RAYON_NUM_THREADS:-8} -export VERBOSE=${VERBOSE:-0} - -flow_script=$(realpath ./flows/${FLOW}.sh) -nargo=$(realpath ../../noir/noir-repo/target/release/nargo) - - -cd ./acir_tests/$TEST_NAME - -if [ "$COMPILE" -ne 0 ]; then - echo -n "$TEST_NAME compiling... " - export RAYON_NUM_THREADS=${RAYON_NUM_THREADS:-4} - rm -rf target - set +e - compile_output=$($nargo compile --silence-warnings 2>&1 && $nargo execute 2>&1) - result=$? - set -e - if [ "$result" -ne 0 ]; then - echo "failed." - echo "$compile_output" - exit $result - fi - mv ./target/$TEST_NAME.json ./target/program.json - mv ./target/$TEST_NAME.gz ./target/witness.gz - if [ "$COMPILE" -eq 2 ]; then - echo "done." - exit 0 - fi -fi - -if [[ ( ! -f ./target/program.json && ! -f ./target/acir.msgpack ) || \ - ( ! -f ./target/witness.gz && ! -f ./target/witness.msgpack ) ]]; then - echo -e "\033[33mSKIPPED\033[0m (uncompiled)" - exit 0; -fi - -set +e -SECONDS=0 -if [ "$VERBOSE" -eq 1 ]; then - output=$($flow_script 2>&1 | tee /dev/stderr) -else - output=$($flow_script 2>&1) -fi -result=$? -duration=$SECONDS -set -e - -if [ $result -eq 0 ]; then - echo -e "${green}PASSED${reset} (${duration}s)" -else - [ "$VERBOSE" -eq 0 ] && echo "$output" - echo -e "${red}FAILED${reset} (${duration}s)" - exit 1 -fi diff --git a/barretenberg/acir_tests/scripts/run_test_browser.sh b/barretenberg/acir_tests/scripts/run_test_browser.sh deleted file mode 100755 index 6ccf6335886d..000000000000 --- a/barretenberg/acir_tests/scripts/run_test_browser.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -source $(git rev-parse --show-toplevel)/ci3/source - -test=$1 - -cd .. - -../../ci3/dump_fail \ - "cd browser-test-app && ../node_modules/.bin/serve -n -L -p 8080 -c ../serve.json dest" > /dev/null & -while ! nc -z localhost 8080 &>/dev/null; do sleep 1; done; -BIN=./headless-test/bb.js.browser ./scripts/run_test.sh $test diff --git a/barretenberg/acir_tests/sol-test/HonkTest.sol b/barretenberg/acir_tests/sol-test/HonkTest.sol index 79e16d0fab47..960fc9cddd6b 100644 --- a/barretenberg/acir_tests/sol-test/HonkTest.sol +++ b/barretenberg/acir_tests/sol-test/HonkTest.sol @@ -12,7 +12,7 @@ contract Test { verifier = new HonkVerifier(); } - function test(bytes calldata proof, bytes32[] calldata publicInputs) public view returns (bool) { + function test(bytes calldata proof, bytes32[] calldata publicInputs) public returns (bool) { return verifier.verify(proof, publicInputs); } } diff --git a/barretenberg/acir_tests/sol-test/src/index.js b/barretenberg/acir_tests/sol-test/src/index.js index 3d5e0061ce82..6ee750c84543 100644 --- a/barretenberg/acir_tests/sol-test/src/index.js +++ b/barretenberg/acir_tests/sol-test/src/index.js @@ -5,8 +5,6 @@ import { ethers } from "ethers"; import solc from "solc"; // Size excluding number of public inputs -const NUMBER_OF_FIELDS_IN_PLONK_PROOF = 93; - const NUMBER_OF_ELEMENTS_IN_HONK_PROOF = 457; const NUMBER_OF_ELEMENTS_IN_HONK_ZK_PROOF = 508; @@ -57,8 +55,6 @@ const [test, verifier] = await Promise.all([ fsPromises.readFile(verifierPath, encoding), ]); -// If testing honk is set, then we compile the honk test suite -const testingHonk = getEnvVarCanBeUndefined("TESTING_HONK"); const hasZK = getEnvVarCanBeUndefined("HAS_ZK"); export const compilationInput = { @@ -89,26 +85,9 @@ export const compilationInput = { }, }; -const NUMBER_OF_FIELDS_IN_PROOF = testingHonk - ? hasZK - ? NUMBER_OF_ELEMENTS_IN_HONK_ZK_PROOF - : NUMBER_OF_ELEMENTS_IN_HONK_PROOF - : NUMBER_OF_FIELDS_IN_PLONK_PROOF; -if (!testingHonk) { - const keyPath = getEnvVar("KEY_PATH"); - const basePath = getEnvVar("BASE_PATH"); - const [key, base] = await Promise.all([ - fsPromises.readFile(keyPath, encoding), - fsPromises.readFile(basePath, encoding), - ]); - - compilationInput.sources["BaseUltraVerifier.sol"] = { - content: base, - }; - compilationInput.sources["Key.sol"] = { - content: key, - }; -} +const NUMBER_OF_FIELDS_IN_PROOF = hasZK + ? NUMBER_OF_ELEMENTS_IN_HONK_ZK_PROOF + : NUMBER_OF_ELEMENTS_IN_HONK_PROOF; var output = JSON.parse(solc.compile(JSON.stringify(compilationInput))); @@ -186,6 +165,20 @@ const linkLibrary = (bytecode, libraryName, libraryAddress) => { return bytecode.replace(regex, address); }; +/** + * Converts binary data to array of field elements (32-byte chunks as hex strings) + * @param {Buffer} buffer - Binary data + * @return {Array} Array of hex strings with 0x prefix + */ +const binaryToFields = (buffer) => { + const fields = []; + for (let i = 0; i < buffer.length; i += 32) { + const chunk = buffer.slice(i, i + 32); + fields.push('0x' + chunk.toString('hex')); + } + return fields; +}; + /** * Takes in a proof as fields, and returns the public inputs, as well as the number of public inputs * @param {Array} proofAsFields @@ -246,26 +239,22 @@ try { const proof = readFileSync(proofPath); proofStr = proof.toString("hex"); - let publicInputsAsFieldsPath = getEnvVarCanBeUndefined( - "PUBLIC_INPUTS_AS_FIELDS" - ); // PUBLIC_INPUTS_AS_FIELDS is not defined for bb plonk, but is for bb honk and bbjs honk. - var publicInputs; - let proofAsFieldsPath = getEnvVarCanBeUndefined("PROOF_AS_FIELDS"); // PROOF_AS_FIELDS is not defined for bbjs, but is for bb plonk and bb honk. + let publicInputsPath = getEnvVarCanBeUndefined("PUBLIC_INPUTS"); + var publicInputs = []; let numExtraPublicInputs = 0; let extraPublicInputs = []; - if (proofAsFieldsPath) { - const proofAsFields = readFileSync(proofAsFieldsPath); + + // For flows that use binary proof format, extract public inputs from the proof + const proofAsFields = binaryToFields(proof); + if (proofAsFields.length > NUMBER_OF_FIELDS_IN_PROOF) { // We need to extract the public inputs from the proof. This might be empty, or just the pairing point object, or be the entire public inputs... - [numExtraPublicInputs, extraPublicInputs] = readPublicInputs( - JSON.parse(proofAsFields.toString()) - ); + [numExtraPublicInputs, extraPublicInputs] = readPublicInputs(proofAsFields); } - // We need to do this because plonk doesn't define this path - if (publicInputsAsFieldsPath) { - const innerPublicInputs = JSON.parse( - readFileSync(publicInputsAsFieldsPath).toString() - ); // assumes JSON array of PI hex strings + // Read public inputs from binary file if available + if (publicInputsPath) { + const publicInputsBinary = readFileSync(publicInputsPath); + const innerPublicInputs = binaryToFields(publicInputsBinary); publicInputs = innerPublicInputs.concat(extraPublicInputs); } else { // for plonk, the extraPublicInputs are all of the public inputs @@ -282,7 +271,7 @@ try { let finalBytecode = bytecode; // Deploy ZKTranscript library if needed and link it - if (testingHonk && hasZK) { + if (hasZK) { // Check if there's a library placeholder in the bytecode const libraryPlaceholder = /__\$[a-fA-F0-9]{34}\$__/; if (libraryPlaceholder.test(bytecode)) { @@ -295,6 +284,9 @@ try { // Deploy the library console.log("Deploying ZKTranscriptLib library..."); const libraryAddress = await deploy(signer, libraryAbi, libraryBytecode); + + // Wait for the library deployment - for some reason we have an issue with nonces here + await new Promise((resolve) => setTimeout(resolve, 500)); console.log("ZKTranscriptLib deployed at:", libraryAddress); // Link the library to the verifier bytecode @@ -310,26 +302,22 @@ try { if (!result) throw new Error("Test failed"); } catch (e) { console.error(testName, "failed"); - if (testingHonk) { - var errorType = e.data; - switch (errorType) { - case WRONG_PROOF_LENGTH: - throw new Error( - "Proof length wrong. Possibile culprits: the NUMBER_OF_FIELDS_IN_* constants; number of public inputs; proof surgery; zk/non-zk discrepancy." - ); - case WRONG_PUBLIC_INPUTS_LENGTH: - throw new Error("Number of inputs in the proof is wrong"); - case SUMCHECK_FAILED: - throw new Error("Sumcheck round failed"); - case SHPLEMINI_FAILED: - throw new Error("PCS round failed"); - case CONSISTENCY_FAILED: - throw new Error("ZK contract: Subgroup IPA consistency check error"); - case GEMINI_CHALLENGE_IN_SUBGROUP: - throw new Error("ZK contract: Gemini challenge error"); - default: - throw e; - } + var errorType = e.data; + switch (errorType) { + case WRONG_PROOF_LENGTH: + throw new Error( + "Proof length wrong. Possibile culprits: the NUMBER_OF_FIELDS_IN_* constants; number of public inputs; proof surgery; zk/non-zk discrepancy." + ); + case WRONG_PUBLIC_INPUTS_LENGTH: + throw new Error("Number of inputs in the proof is wrong"); + case SUMCHECK_FAILED: + throw new Error("Sumcheck round failed"); + case SHPLEMINI_FAILED: + throw new Error("PCS round failed"); + case CONSISTENCY_FAILED: + throw new Error("ZK contract: Subgroup IPA consistency check error"); + case GEMINI_CHALLENGE_IN_SUBGROUP: + throw new Error("ZK contract: Gemini challenge error"); } throw e; } finally { diff --git a/barretenberg/acir_tests/yarn.lock b/barretenberg/acir_tests/yarn.lock index f44584125afc..7a8313a4a340 100644 --- a/barretenberg/acir_tests/yarn.lock +++ b/barretenberg/acir_tests/yarn.lock @@ -297,21 +297,20 @@ __metadata: languageName: node linkType: hard -"@puppeteer/browsers@npm:2.3.0": - version: 2.3.0 - resolution: "@puppeteer/browsers@npm:2.3.0" +"@puppeteer/browsers@npm:2.10.10": + version: 2.10.10 + resolution: "@puppeteer/browsers@npm:2.10.10" dependencies: - debug: "npm:^4.3.5" + debug: "npm:^4.4.3" extract-zip: "npm:^2.0.1" progress: "npm:^2.0.3" - proxy-agent: "npm:^6.4.0" - semver: "npm:^7.6.3" - tar-fs: "npm:^3.0.6" - unbzip2-stream: "npm:^1.4.3" + proxy-agent: "npm:^6.5.0" + semver: "npm:^7.7.2" + tar-fs: "npm:^3.1.0" yargs: "npm:^17.7.2" bin: browsers: lib/cjs/main-cli.js - checksum: 10c0/8665a7d5be5e1489855780b7684bf94a55647b54a8391474cbdc1defdb2e4e6642722ef1d20bfabe49d3aed3eec2c8db41d6eabc24440f4a16d071effc5a1049 + checksum: 10c0/88dd1f34821d4479eed399ab6095e57e91bd4715134e5706ed86ab11b501402294db9446caf349bdd163ff38805e6a7beaa9eed4ecdeeaf2a43373079f890369 languageName: node linkType: hard @@ -1255,16 +1254,6 @@ __metadata: languageName: node linkType: hard -"buffer@npm:^5.2.1": - version: 5.7.1 - resolution: "buffer@npm:5.7.1" - dependencies: - base64-js: "npm:^1.3.1" - ieee754: "npm:^1.1.13" - checksum: 10c0/27cac81cff434ed2876058d72e7c4789d11ff1120ef32c9de48f59eab58179b66710c488987d295ae89a228f835fc66d088652dffeb8e3ba8659f80eb091d55e - languageName: node - linkType: hard - "buffer@npm:^6.0.3": version: 6.0.3 resolution: "buffer@npm:6.0.3" @@ -1435,16 +1424,15 @@ __metadata: languageName: node linkType: hard -"chromium-bidi@npm:0.6.3": - version: 0.6.3 - resolution: "chromium-bidi@npm:0.6.3" +"chromium-bidi@npm:9.1.0": + version: 9.1.0 + resolution: "chromium-bidi@npm:9.1.0" dependencies: - mitt: "npm:3.0.1" - urlpattern-polyfill: "npm:10.0.0" - zod: "npm:3.23.8" + mitt: "npm:^3.0.1" + zod: "npm:^3.24.1" peerDependencies: devtools-protocol: "*" - checksum: 10c0/226829bfc3c9de54803cfbce5cb3075f729aa2f862b22e2e91c75d35425b537f85c49d36793d69bf4778115c4bd31ab3e9eaee1cbc28a1506a6d4b1752e34b9a + checksum: 10c0/ab75ab4e3434e1fab11a35a1fa6f64dd55bfcdf046251f4489ece828ae18b3c88b16728b00d02c4ed012b686f455cb589774370ef8f3f07b9d762476c4c672cc languageName: node linkType: hard @@ -1723,15 +1711,15 @@ __metadata: languageName: node linkType: hard -"debug@npm:4, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.4, debug@npm:^4.3.5, debug@npm:^4.3.6": - version: 4.4.0 - resolution: "debug@npm:4.4.0" +"debug@npm:4, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.4, debug@npm:^4.4.3": + version: 4.4.3 + resolution: "debug@npm:4.4.3" dependencies: ms: "npm:^2.1.3" peerDependenciesMeta: supports-color: optional: true - checksum: 10c0/db94f1a182bf886f57b4755f85b3a74c39b5114b9377b7ab375dc2cfa3454f09490cc6c30f829df3fc8042bc8b8995f6567ce5cd96f3bc3688bd24027197d9de + checksum: 10c0/d79136ec6c83ecbefd0f6a5593da6a9c91ec4d7ddc4b54c883d6e71ec9accb5f67a1a5e96d00a328196b5b5c86d365e98d8a3a70856aaf16b4e7b1985e67f5a6 languageName: node linkType: hard @@ -1812,10 +1800,10 @@ __metadata: languageName: node linkType: hard -"devtools-protocol@npm:0.0.1312386": - version: 0.0.1312386 - resolution: "devtools-protocol@npm:0.0.1312386" - checksum: 10c0/1073b2edcee76db094fdce97fe8869f3469866513e864379e04311a429b439ba51e54809fdffb09b67bf0c37b5ac5bfd2b0536ae217b7ea2cbe2e571fbed7e8e +"devtools-protocol@npm:0.0.1495869": + version: 0.0.1495869 + resolution: "devtools-protocol@npm:0.0.1495869" + checksum: 10c0/ef3f35ddd914f07112a4e9417cc6afbd82550976cdb30096710a3024db303d29d13a09c63cb7ef4e0f38e70986a90cb50b39f0f0bcf0fd30937e5a0cba3c521b languageName: node linkType: hard @@ -2585,7 +2573,7 @@ __metadata: chalk: "npm:^5.3.0" commander: "npm:^12.1.0" playwright: "npm:1.49.0" - puppeteer: "npm:^22.4.1" + puppeteer: "npm:^24.22.3" ts-node: "npm:^10.9.2" typescript: "npm:^5.4.2" languageName: unknown @@ -2787,7 +2775,7 @@ __metadata: languageName: node linkType: hard -"ieee754@npm:^1.1.13, ieee754@npm:^1.2.1": +"ieee754@npm:^1.2.1": version: 1.2.1 resolution: "ieee754@npm:1.2.1" checksum: 10c0/b0782ef5e0935b9f12883a2e2aa37baa75da6e66ce6515c168697b42160807d9330de9a32ec1ed73149aea02e0d822e572bca6f1e22bdcbd2149e13b050b17bb @@ -3436,7 +3424,7 @@ __metadata: languageName: node linkType: hard -"mitt@npm:3.0.1": +"mitt@npm:^3.0.1": version: 3.0.1 resolution: "mitt@npm:3.0.1" checksum: 10c0/3ab4fdecf3be8c5255536faa07064d05caa3dd332bd318ff02e04621f7b3069ca1de9106cfe8e7ced675abfc2bec2ce4c4ef321c4a1bb1fb29df8ae090741913 @@ -4055,7 +4043,7 @@ __metadata: languageName: node linkType: hard -"proxy-agent@npm:^6.4.0": +"proxy-agent@npm:^6.5.0": version: 6.5.0 resolution: "proxy-agent@npm:6.5.0" dependencies: @@ -4095,30 +4083,34 @@ __metadata: languageName: node linkType: hard -"puppeteer-core@npm:22.15.0": - version: 22.15.0 - resolution: "puppeteer-core@npm:22.15.0" +"puppeteer-core@npm:24.22.3": + version: 24.22.3 + resolution: "puppeteer-core@npm:24.22.3" dependencies: - "@puppeteer/browsers": "npm:2.3.0" - chromium-bidi: "npm:0.6.3" - debug: "npm:^4.3.6" - devtools-protocol: "npm:0.0.1312386" - ws: "npm:^8.18.0" - checksum: 10c0/6d041db5f654088857a39e592672fe8cce1e974a1547020d404d3bd5f0e1568eecb2de9b4626b6a48cbe15da1c6ee9d33962cb473dcb67ff08927f4d4ec1e461 + "@puppeteer/browsers": "npm:2.10.10" + chromium-bidi: "npm:9.1.0" + debug: "npm:^4.4.3" + devtools-protocol: "npm:0.0.1495869" + typed-query-selector: "npm:^2.12.0" + webdriver-bidi-protocol: "npm:0.2.11" + ws: "npm:^8.18.3" + checksum: 10c0/e24f1b42b75495830d0d91e167f7a6e3961b0c91dcc2aae8cf23ebd6c2eee69f0869f0ea14c79936f754ac6fd899a1c7a201e96d3f82220c9fb2058ca1b92fd4 languageName: node linkType: hard -"puppeteer@npm:^22.4.1": - version: 22.15.0 - resolution: "puppeteer@npm:22.15.0" +"puppeteer@npm:^24.22.3": + version: 24.22.3 + resolution: "puppeteer@npm:24.22.3" dependencies: - "@puppeteer/browsers": "npm:2.3.0" + "@puppeteer/browsers": "npm:2.10.10" + chromium-bidi: "npm:9.1.0" cosmiconfig: "npm:^9.0.0" - devtools-protocol: "npm:0.0.1312386" - puppeteer-core: "npm:22.15.0" + devtools-protocol: "npm:0.0.1495869" + puppeteer-core: "npm:24.22.3" + typed-query-selector: "npm:^2.12.0" bin: - puppeteer: lib/esm/puppeteer/node/cli.js - checksum: 10c0/c31ec024dd7722c32a681c3e2ae23751021abb3f4c39fbdd895859327e855ae2b89e5682fcdb789de7412314701d882bd37e8545e45cf0a97cd5df06449987b9 + puppeteer: lib/cjs/puppeteer/node/cli.js + checksum: 10c0/746c4ec0545a6abd26d3998d484b53a03edb552210c37c231a34268a98f36c4e663113041a725ee591ed02438dd271caa7ab97977e524772df53429474335903 languageName: node linkType: hard @@ -4445,12 +4437,12 @@ __metadata: languageName: node linkType: hard -"semver@npm:^7.3.4, semver@npm:^7.3.5, semver@npm:^7.6.3": - version: 7.7.1 - resolution: "semver@npm:7.7.1" +"semver@npm:^7.3.4, semver@npm:^7.3.5, semver@npm:^7.7.2": + version: 7.7.2 + resolution: "semver@npm:7.7.2" bin: semver: bin/semver.js - checksum: 10c0/fd603a6fb9c399c6054015433051bdbe7b99a940a8fb44b85c2b524c4004b023d7928d47cb22154f8d054ea7ee8597f586605e05b52047f048278e4ac56ae958 + checksum: 10c0/aca305edfbf2383c22571cb7714f48cadc7ac95371b4b52362fb8eeffdfbc0de0669368b82b2b15978f8848f01d7114da65697e56cd8c37b0dab8c58e543f9ea languageName: node linkType: hard @@ -4935,9 +4927,9 @@ __metadata: languageName: node linkType: hard -"tar-fs@npm:^3.0.6": - version: 3.0.8 - resolution: "tar-fs@npm:3.0.8" +"tar-fs@npm:^3.1.0": + version: 3.1.1 + resolution: "tar-fs@npm:3.1.1" dependencies: bare-fs: "npm:^4.0.1" bare-path: "npm:^3.0.0" @@ -4948,7 +4940,7 @@ __metadata: optional: true bare-path: optional: true - checksum: 10c0/b70bb2ad0490ab13b48edd10bd648bb54c52b681981cdcdc3aa4517e98ad94c94659ddca1925872ee658d781b9fcdd2b1c808050647f06b1bca157dd2fcae038 + checksum: 10c0/0c677d711c4aa41f94e1a712aa647022ba1910ff84430739e5d9e95a615e3ea1b7112dc93164fc8ce30dc715befcf9cfdc64da27d4e7958d73c59bda06aa0d8e languageName: node linkType: hard @@ -5040,13 +5032,6 @@ __metadata: languageName: node linkType: hard -"through@npm:^2.3.8": - version: 2.3.8 - resolution: "through@npm:2.3.8" - checksum: 10c0/4b09f3774099de0d4df26d95c5821a62faee32c7e96fb1f4ebd54a2d7c11c57fe88b0a0d49cf375de5fee5ae6bf4eb56dbbf29d07366864e2ee805349970d3cc - languageName: node - linkType: hard - "thunky@npm:^1.0.2": version: 1.1.0 resolution: "thunky@npm:1.1.0" @@ -5173,6 +5158,13 @@ __metadata: languageName: node linkType: hard +"typed-query-selector@npm:^2.12.0": + version: 2.12.0 + resolution: "typed-query-selector@npm:2.12.0" + checksum: 10c0/069509887ecfff824a470f5f93d300cc9223cb059a36c47ac685f2812c4c9470340e07615893765e4264cef1678507532fa78f642fd52f276b589f7f5d791f79 + languageName: node + linkType: hard + "typescript@npm:^5.4.2, typescript@npm:^5.6.3": version: 5.8.2 resolution: "typescript@npm:5.8.2" @@ -5193,16 +5185,6 @@ __metadata: languageName: node linkType: hard -"unbzip2-stream@npm:^1.4.3": - version: 1.4.3 - resolution: "unbzip2-stream@npm:1.4.3" - dependencies: - buffer: "npm:^5.2.1" - through: "npm:^2.3.8" - checksum: 10c0/2ea2048f3c9db3499316ccc1d95ff757017ccb6f46c812d7c42466247e3b863fb178864267482f7f178254214247779daf68e85f50bd7736c3c97ba2d58b910a - languageName: node - linkType: hard - "undici-types@npm:~6.19.2": version: 6.19.8 resolution: "undici-types@npm:6.19.8" @@ -5275,13 +5257,6 @@ __metadata: languageName: node linkType: hard -"urlpattern-polyfill@npm:10.0.0": - version: 10.0.0 - resolution: "urlpattern-polyfill@npm:10.0.0" - checksum: 10c0/43593f2a89bd54f2d5b5105ef4896ac5c5db66aef723759fbd15cd5eb1ea6cdae9d112e257eda9bbc3fb0cd90be6ac6e9689abe4ca69caa33114f42a27363531 - languageName: node - linkType: hard - "util-deprecate@npm:^1.0.1, util-deprecate@npm:~1.0.1": version: 1.0.2 resolution: "util-deprecate@npm:1.0.2" @@ -5345,6 +5320,13 @@ __metadata: languageName: node linkType: hard +"webdriver-bidi-protocol@npm:0.2.11": + version: 0.2.11 + resolution: "webdriver-bidi-protocol@npm:0.2.11" + checksum: 10c0/de03680e6c061801d730243eb8dead6d5cb1ea4eda63546528b52f25f9c7a3cc56709b0de689e155a3d070c3a97269d9b203aef010349ee5b0df9582929bd8bb + languageName: node + linkType: hard + "webpack-cli@npm:^6.0.1": version: 6.0.1 resolution: "webpack-cli@npm:6.0.1" @@ -5578,24 +5560,9 @@ __metadata: languageName: node linkType: hard -"ws@npm:8.17.1": - version: 8.17.1 - resolution: "ws@npm:8.17.1" - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: ">=5.0.2" - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - checksum: 10c0/f4a49064afae4500be772abdc2211c8518f39e1c959640457dcee15d4488628620625c783902a52af2dd02f68558da2868fd06e6fd0e67ebcd09e6881b1b5bfe - languageName: node - linkType: hard - -"ws@npm:^8.18.0": - version: 8.18.1 - resolution: "ws@npm:8.18.1" +"ws@npm:^8.17.1": + version: 8.18.3 + resolution: "ws@npm:8.18.3" peerDependencies: bufferutil: ^4.0.1 utf-8-validate: ">=5.0.2" @@ -5604,7 +5571,7 @@ __metadata: optional: true utf-8-validate: optional: true - checksum: 10c0/e498965d6938c63058c4310ffb6967f07d4fa06789d3364829028af380d299fe05762961742971c764973dce3d1f6a2633fe8b2d9410c9b52e534b4b882a99fa + checksum: 10c0/eac918213de265ef7cb3d4ca348b891a51a520d839aa51cdb8ca93d4fa7ff9f6ccb339ccee89e4075324097f0a55157c89fa3f7147bde9d8d7e90335dc087b53 languageName: node linkType: hard @@ -5668,9 +5635,9 @@ __metadata: languageName: node linkType: hard -"zod@npm:3.23.8": - version: 3.23.8 - resolution: "zod@npm:3.23.8" - checksum: 10c0/8f14c87d6b1b53c944c25ce7a28616896319d95bc46a9660fe441adc0ed0a81253b02b5abdaeffedbeb23bdd25a0bf1c29d2c12dd919aef6447652dd295e3e69 +"zod@npm:^3.24.1": + version: 3.25.76 + resolution: "zod@npm:3.25.76" + checksum: 10c0/5718ec35e3c40b600316c5b4c5e4976f7fee68151bc8f8d90ec18a469be9571f072e1bbaace10f1e85cf8892ea12d90821b200e980ab46916a6166a4260a983c languageName: node linkType: hard diff --git a/barretenberg/barretenberg.code-workspace b/barretenberg/barretenberg.code-workspace index cd08c7a31666..6fed8f539653 100644 --- a/barretenberg/barretenberg.code-workspace +++ b/barretenberg/barretenberg.code-workspace @@ -1,5 +1,5 @@ { - // Each "folder" can define a different project in the main repo, + // Each "folder" can define a different project in the main repo, // relative to `.code-workspace`. "folders": [ { @@ -70,7 +70,7 @@ // Clangd. Note that this setting may be overridden by user settings // to the default value "clangd". // - "clangd.path": "clangd-16", + "clangd.path": "clangd-20", // We should disable automatic inclusion of headers unless we decide to follow "WhyIWYU". "clangd.arguments": [ "-header-insertion=never" @@ -145,6 +145,6 @@ "cwd": "${command:cmake.buildDirectory}", "internalConsoleOptions": "openOnSessionStart", "console": "internalConsole", - } + } }, -} \ No newline at end of file +} diff --git a/barretenberg/bbup/bb-versions.json b/barretenberg/bbup/bb-versions.json index 09c75ba362e9..217808319450 100644 --- a/barretenberg/bbup/bb-versions.json +++ b/barretenberg/bbup/bb-versions.json @@ -1,4 +1,13 @@ { + "1.0.0-beta.13": "1.2.0", + "1.0.0-beta.12": "0.87.0", + "1.0.0-beta.11": "0.87.0", + "1.0.0-beta.10": "0.87.0", + "1.0.0-beta.9": "0.87.0", + "1.0.0-beta.8": "0.87.0", + "1.0.0-beta.7": "0.87.0", + "1.0.0-beta.6": "0.84.0", + "1.0.0-beta.5": "0.84.0", "1.0.0-beta.4": "0.84.0", "1.0.0-beta.3": "0.82.2", "1.0.0-beta.2": "0.82.2", diff --git a/barretenberg/bbup/bbup b/barretenberg/bbup/bbup index 55f7ed046ca9..29d81be28f41 100755 --- a/barretenberg/bbup/bbup +++ b/barretenberg/bbup/bbup @@ -48,7 +48,7 @@ get_bb_version_for_noir() { fi fi - local lookup_url="https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/bbup/bb-versions.json" + local lookup_url="https://raw.githubusercontent.com/AztecProtocol/aztec-packages/next/barretenberg/bbup/bb-versions.json" # Extract BB version from install script local bb_version=$(curl --fail -s "$lookup_url" | jq -r --arg version "$resolved_version" '.[$version]') diff --git a/barretenberg/bbup/bootstrap.sh b/barretenberg/bbup/bootstrap.sh index fd59fbc55f67..bec13b7f7029 100755 --- a/barretenberg/bbup/bootstrap.sh +++ b/barretenberg/bbup/bootstrap.sh @@ -19,7 +19,7 @@ function test_cmds { # This is not called in ci. It is just for a developer to run the tests. function test { echo_header "bbup test" - test_cmds | filter_test_cmds | parallelise + test_cmds | filter_test_cmds | parallelize } case "$cmd" in diff --git a/barretenberg/bbup/install b/barretenberg/bbup/install index 030c5f6c5c54..dfaef239fae0 100755 --- a/barretenberg/bbup/install +++ b/barretenberg/bbup/install @@ -12,7 +12,7 @@ ERROR="✗" BB_DIR="${HOME}/.bb" INSTALL_PATH="${BB_DIR}/bbup" -BBUP_URL="https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/bbup/bbup" +BBUP_URL="https://raw.githubusercontent.com/AztecProtocol/aztec-packages/next/barretenberg/bbup/bbup" # Create .bb directory if it doesn't exist mkdir -p "$BB_DIR" diff --git a/barretenberg/bootstrap.sh b/barretenberg/bootstrap.sh index 810c209dd464..ac48877abdb6 100755 --- a/barretenberg/bootstrap.sh +++ b/barretenberg/bootstrap.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash source $(git rev-parse --show-toplevel)/ci3/source - function bootstrap_all { # To run bb we need a crs. # Download ignition up front to ensure no race conditions at runtime. @@ -32,20 +31,6 @@ case "$cmd" in "release-preview") ./docs/bootstrap.sh release-preview ;; - bootstrap_e2e_hack) - echo "WARNING: This assumes your PR only changes barretenberg and the rest of the repository is unchanged from master." - echo "WARNING: This is only sound if you have not changed VK generation! (or noir-projects VKs will be incorrect)." - echo "WARNING: It builds up until yarn-project and allows end-to-end tests (not boxes/playground/release image etc)." - merge_base=$(git merge-base HEAD origin/master) - for project in noir barretenberg avm-transpiler noir-projects l1-contracts yarn-project ; do - if [ $project == barretenberg ]; then - ../$project/bootstrap.sh # i.e. this script - else - AZTEC_CACHE_COMMIT=$merge_base ../$project/bootstrap.sh - fi - done - ;; - *) echo "Unknown command: $cmd" exit 1 diff --git a/barretenberg/cpp/CLAUDE.md b/barretenberg/cpp/CLAUDE.md new file mode 100644 index 000000000000..c9653a5c5053 --- /dev/null +++ b/barretenberg/cpp/CLAUDE.md @@ -0,0 +1,83 @@ +succint aztec-packages cheat sheet. + +THE PROJECT ROOT IS AT TWO LEVELS ABOVE THIS FOLDER. Typically, the repository is at ~/aztec-packages. all advice is from the root. + +Run ./bootstrap.sh at the top-level to be sure the repo fully builds. +Bootstrap scripts can be called with relative paths e.g. ../barretenberg/bootstrap.sh +You can use DISABLE_AVM=1 to bootstrap things generally. + +# Working on modules: + +## barretenberg/ +The core proving system library. Focus development is in barretenberg/cpp. + +### cpp/ => cpp code for prover library +Bootstrap modes: +- `./bootstrap.sh` => full build, needed for other components +- `./bootstrap.sh build` => standard build +- `DISABLE_AVM=1 ./bootstrap.sh build_native` => quick build without AVM. Good for verifying compilation works. Needed to build ts/ +Development commands: +- cmake --preset build-no-avm + cd build-no-avm + ninja + NOTE: DO NOT add the -j flag, default is optimal. + where test is based on what you're working on: + - `./bin/ultra_honk_tests` - Ultra Honk circuit tests + - `./bin/client_ivc_tests` - Client IVC tests + - `./bin/api_tests` - API/CLI tests + - `./bin/stdlib_*_tests` - Standard library tests + - `./bin/crypto_*_tests` - Cryptographic primitive tests + +### Barretenberg module components: +- **commitment_schemes/** - Polynomial commitment schemes (KZG, IPA) +- **crypto/** - Cryptographic primitives (hashes, merkle trees, fields) +- **ecc/** - Elliptic curve operations +- **flavor/** - Circuit proving system flavors (Ultra, Mega) +- **honk/** - The Honk proving system implementation +- **stdlib/** - Circuit-friendly implementations of primitives +- **ultra_honk/** - Ultra Honk prover/verifier +- **client_ivc/** - Client-side IVC (Incremental Verifiable Computation) +- **vm2/** - AVM implementation (not enabled, but might need to be fixed for compilation issues in root ./bootstrap.sh) +- **bbapi/** - BB API for external interaction. If changing here, we will also want to update the ts/ folder because bb.js consumes this. (first build ninja bb in build/) +- **dsl/** - ACIR definition in C++. This is dictated by the serialization in noir/, so refactor should generally not change the structure without confirming that the user is changing noir. + +### ts/ => typescript code for bb.js +Bootstrap modes: +- `./bootstrap.sh` => generate TypeScript bindings and build. See package.json for more fine-grained commands. +Other commands: +- `yarn build:esm` => the quickest way to rebuild, if only changes inside ts/ folder, and only testing yarn-project. + +## noir/ +### noir-repo/ => clone of noir programming language git repo +Bootstrap modes: +- `./bootstrap.sh` => standard build + +## avm-transpiler: +Transpiles Noir to AVM bytecode +Bootstrap modes: +- `./bootstrap.sh` => standard build + +## Integration testing: +The focus is on barretenberg/cpp development. Other components need to work with barretenberg changes: + +### yarn-project/end-to-end - E2E tests that verify the full stack +Run end-to-end tests from the root directory: +```bash +# Run specific e2e tests +yarn-project/end-to-end/scripts/run_test.sh simple e2e_block_building +# To run this you CANNOT USE DISABLE_AVM=1. Only run this if the user asks (e.g. 'run the prover full test') You first need to confirm with the user that they want to build without AVM. +yarn-project/end-to-end/scripts/run_test.sh simple e2e_prover/full + +### yarn-project IVC integration tests +Run IVC (Incremental Verifiable Computation) integration tests from the root: +```bash +# Run specific IVC tests +yarn-project/scripts/run_test.sh ivc-integration/src/native_client_ivc_integration.test.ts +yarn-project/scripts/run_test.sh ivc-integration/src/wasm_client_ivc_integration.test.ts +yarn-project/scripts/run_test.sh ivc-integration/src/browser_client_ivc_integration.test.ts + +# Run rollup IVC tests (with verbose logging) +BB_VERBOSE=1 yarn-project/scripts/run_test.sh ivc-integration/src/rollup_ivc_integration.test.ts +``` + +When making barretenberg changes, ensure these tests still pass. diff --git a/barretenberg/cpp/CMakeLists.txt b/barretenberg/cpp/CMakeLists.txt index 56801e4a39b1..202f1bbff7cc 100644 --- a/barretenberg/cpp/CMakeLists.txt +++ b/barretenberg/cpp/CMakeLists.txt @@ -29,7 +29,7 @@ option(COVERAGE "Enable collecting coverage from tests" OFF) option(ENABLE_ASAN "Address sanitizer for debugging tricky memory corruption" OFF) option(ENABLE_HEAVY_TESTS "Enable heavy tests when collecting coverage" OFF) # Note: Must do 'sudo apt-get install libdw-dev' or equivalent -option(CHECK_CIRCUIT_STACKTRACES "Enable (slow) stack traces for check circuit" OFF) +option(ENABLE_STACKTRACES "Enable stack traces on assertion" OFF) option(ENABLE_TRACY "Enable low-medium overhead profiling for memory and performance with tracy" OFF) option(ENABLE_PIC "Builds with position independent code" OFF) option(SYNTAX_ONLY "only check syntax (-fsyntax-only)" OFF) @@ -43,8 +43,8 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "a set(RUN_HAVE_POSIX_REGEX 0) endif() -if(CHECK_CIRCUIT_STACKTRACES) - add_compile_options(-DCHECK_CIRCUIT_STACKTRACES) +if(ENABLE_STACKTRACES) + add_compile_options(-DCHECK_CIRCUIT_STACKTRACES -DSTACKTRACES) endif() if(ENABLE_TRACY OR ENABLE_TRACY_TIME_INSTRUMENTED) @@ -103,7 +103,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED TRUE) set(CMAKE_CXX_EXTENSIONS ON) if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-fbracket-depth=512) + add_compile_options(-fbracket-depth=1024) if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "14") message(WARNING "Clang <14 is not supported") endif() @@ -139,6 +139,8 @@ include(cmake/gtest.cmake) include(cmake/benchmark.cmake) include(cmake/module.cmake) include(cmake/msgpack.cmake) +include(cmake/nlohmann_json.cmake) +include(cmake/httplib.cmake) include(cmake/lmdb.cmake) include(cmake/libdeflate.cmake) @@ -148,6 +150,11 @@ if (NOT WASM) else() set(DISABLE_AZTEC_VM ON) endif() + +if(AVM_TRANSPILER_LIB) + include(cmake/avm-transpiler.cmake) +endif() + if(DISABLE_AZTEC_VM) add_definitions(-DDISABLE_AZTEC_VM=1) endif() @@ -161,7 +168,7 @@ if(DISABLE_AZTEC_VM) endif() add_subdirectory(src) if (ENABLE_ASAN AND NOT(FUZZING)) - find_program(LLVM_SYMBOLIZER_PATH NAMES llvm-symbolizer-16) + find_program(LLVM_SYMBOLIZER_PATH NAMES llvm-symbolizer-20) if (NOT(LLVM_SYMBOLIZER_PATH)) message(WARNING "LLVM symbolizer not found, so ASAN output will be limited") else() diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 2c2631225f64..c30f1c99e064 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -18,6 +18,7 @@ "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" }, "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", "TARGET_ARCH": "skylake" } }, @@ -44,113 +45,82 @@ "description": "Build with Clang installed via Homebrew", "inherits": "default", "environment": { - "CC": "$env{BREW_PREFIX}/opt/llvm@18/bin/clang", - "CXX": "$env{BREW_PREFIX}/opt/llvm@18/bin/clang++" + "CC": "$env{BREW_PREFIX}/opt/llvm@20/bin/clang", + "CXX": "$env{BREW_PREFIX}/opt/llvm@20/bin/clang++" } }, { - "name": "darwin-arm64", - "displayName": "Apple arm64 Cross-compile", + "name": "clang20", + "displayName": "Build with Clang-20", + "description": "Build with globally installed Clang-20", "inherits": "default", - "binaryDir": "build-darwin-arm64", - "cacheVariables": { - "CMAKE_BUILD_TYPE": "Release", - "CMAKE_TOOLCHAIN_FILE": "/opt/osxcross/toolchain.cmake", - "CMAKE_EXE_LINKER_FLAGS": "-fuse-ld=/opt/osxcross/bin/arm64-apple-darwin23-ld" - }, + "binaryDir": "build", "environment": { - "CXXFLAGS": "-Wno-deprecated-declarations", - "OSXCROSS_HOST": "arm64-apple-darwin23", - "OSXCROSS_TARGET_DIR": "/opt/osxcross", - "OSXCROSS_TARGET": "darwin23", - "OSXCROSS_SDK": "/opt/osxcross/SDK/MacOSX14.0.sdk" - } - }, - { - "name": "darwin-amd64", - "displayName": "Apple amd64 Cross-compile", - "inherits": "darwin-arm64", - "binaryDir": "build-darwin-amd64", - "cacheVariables": { - "CMAKE_EXE_LINKER_FLAGS": "-fuse-ld=/opt/osxcross/bin/x86_64-apple-darwin23-ld" + "CC": "clang-20", + "CXX": "clang++-20" }, - "environment": { - "CXXFLAGS": "-Wno-deprecated-declarations", - "OSXCROSS_HOST": "x86_64-apple-darwin23" - } - }, - { - "name": "clang18", - "displayName": "Build with Clang-18", - "description": "Build with globally installed Clang-18", - "inherits": "default", - "environment": { - "CC": "clang-18", - "CXX": "clang++-18" + "cacheVariables": { + "AVM_TRANSPILER_LIB": "${sourceDir}/../../avm-transpiler/target/release/libavm_transpiler.a" } }, { - "name": "clang16", - "displayName": "Build with Clang-16", - "description": "Build with globally installed Clang-16", + "name": "clang20-assert", + "displayName": "Build with Clang-20", + "description": "Build with globally installed Clang-20", "inherits": "default", + "binaryDir": "build", "environment": { - "CC": "clang-16", - "CXX": "clang++-16" + "CC": "clang-20", + "CXX": "clang++-20" + }, + "cacheVariables": { + "CMAKE_BUILD_TYPE": "RelWithAssert" } }, { - "name": "clang16-no-exceptions", - "displayName": "Build with Clang-16", - "description": "Build with globally installed Clang-16", + "name": "clang20-no-exceptions", + "displayName": "Build with Clang-20", + "description": "Build with globally installed Clang-20", "inherits": "default", "environment": { - "CC": "clang-16", - "CXX": "clang++-16", + "CC": "clang-20", + "CXX": "clang++-20", "CMAKE_CXX_FLAGS": "-DBB_NO_EXCEPTIONS" } }, { - "name": "clang16-pic", + "name": "clang20-pic", "displayName": "Release build with Position Independent Code", - "description": "Build with globally installed Clang-16 using Position Independent Code", - "inherits": "clang16", + "description": "Build with globally installed Clang-20 using Position Independent Code", + "inherits": "clang20", "binaryDir": "build-pic", "cacheVariables": { "ENABLE_PIC": "ON" } }, { - "name": "clang16-pic-assert", - "displayName": "Release build with Position Independent Code and asserts", - "description": "Build with globally installed Clang-16 using Position Independent Code and asserts", - "inherits": "clang16-pic", - "cacheVariables": { - "CMAKE_BUILD_TYPE": "RelWithAssert" - } - }, - { - "name": "clang16-dbg", - "displayName": "Debugging build with Clang-16", - "description": "Build with globally installed Clang-16 in debug mode", - "inherits": "clang16", + "name": "debug", + "displayName": "Debugging build with Clang-20", + "description": "Build with globally installed Clang-20 in debug mode", + "inherits": "clang20", "binaryDir": "build-debug", "environment": { - "CMAKE_BUILD_TYPE": "Debug", "CFLAGS": "-gdwarf-4", "CXXFLAGS": "-gdwarf-4", "LDFLAGS": "-gdwarf-4" }, "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", "ENABLE_ASAN": "OFF", - "DISABLE_ASM": "ON" + "DISABLE_ASM": "ON", + "ENABLE_STACKTRACES": "ON" } }, { "name": "tracy-memory", "displayName": "Release build with tracy with memory tracking", "description": "Release build with tracy with memory tracking", - "inherits": "clang16-assert", + "inherits": "clang20", "binaryDir": "build-tracy-memory", "cacheVariables": { "ENABLE_TRACY": "ON", @@ -168,7 +138,7 @@ "displayName": "Build for tracy time profiling via instrumentation", "description": "Build for tracy time profiling via instrumentation", "binaryDir": "build-tracy-time-instrumented", - "inherits": "clang16-assert", + "inherits": "clang20", "cacheVariables": { "ENABLE_TRACY_TIME_INSTRUMENTED": "ON" } @@ -178,14 +148,14 @@ "displayName": "Build for tracy time profiling via sampling", "description": "Build for tracy time profiling via sampling", "binaryDir": "build-tracy-time-sampled", - "inherits": "clang16-assert", + "inherits": "clang20", "environment": { - "CMAKE_BUILD_TYPE": "RelWithDebInfo", "CFLAGS": "-g -fno-omit-frame-pointer", "CXXFLAGS": "-g -fno-omit-frame-pointer", "LDFLAGS": "-g -fno-omit-frame-pointer -rdynamic" }, "cacheVariables": { + "CMAKE_BUILD_TYPE": "RelWithDebInfo", "ENABLE_TRACY": "ON" } }, @@ -193,7 +163,7 @@ "name": "tracy-gates", "displayName": "Release build with tracy - but hacked for gate tracking", "description": "Release build with tracy - but hacker for gate tracking", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-tracy-gates", "cacheVariables": { "ENABLE_TRACY": "ON", @@ -201,92 +171,94 @@ } }, { - "name": "clang16-dbg-fast", - "displayName": "Optimized debug build with Clang-16", - "description": "Build with globally installed Clang-16 in optimized debug mode", - "inherits": "clang16-dbg", + "name": "debug-fast", + "displayName": "Optimized debug build with Clang-20", + "description": "Build with globally installed Clang-20 in optimized debug mode", + "inherits": "debug", "binaryDir": "build-debug-fast", "environment": { - "CMAKE_BUILD_TYPE": "Debug", "CFLAGS": "-O2 -gdwarf", "CXXFLAGS": "-O2 -gdwarf-4", "LDFLAGS": "-O2 -gdwarf-4" - } - }, - { - "name": "clang16-no-avm", - "displayName": "Optimized build with Clang-16 (no AVM)", - "description": "Build with globally installed Clang-16 excluding the Aztec VM", - "inherits": "clang16", - "binaryDir": "build-no-avm", + }, "cacheVariables": { - "DISABLE_AZTEC_VM": "ON" + "ENABLE_STACKTRACES": "ON" } }, { - "name": "clang16-dbg-no-avm", - "displayName": "Optimized debug build with Clang-16 (no AVM)", - "description": "Build with globally installed Clang-16 in debug mode excluding the Aztec VM", - "inherits": "clang16-dbg", - "binaryDir": "build-debug-no-avm", + "name": "debug-fast-notraces", + "displayName": "Optimized debug build with Clang-20", + "description": "Build with globally installed Clang-20 in optimized debug mode (no stack traces)", + "inherits": "debug", + "binaryDir": "build-debug-fast", + "environment": { + "CFLAGS": "-O2 -gdwarf", + "CXXFLAGS": "-O2 -gdwarf-4", + "LDFLAGS": "-O2 -gdwarf-4" + }, "cacheVariables": { - "DISABLE_AZTEC_VM": "ON" + "DISABLE_ASM": "OFF", + "ENABLE_STACKTRACES": "OFF" } }, { - "name": "clang16-dbg-fast-circuit-check-traces", - "displayName": "Optimized debug build with Clang-16 with stack traces for failing circuit checks", - "description": "Build with globally installed Clang-16 in optimized debug mode with stack traces for failing circuit checks", - "inherits": "clang16-dbg-fast", - "binaryDir": "build-debug-fast-circuit-check-traces", + "name": "clang20-no-avm", + "displayName": "Optimized build with Clang-20 (no AVM)", + "description": "Build with globally installed Clang-20 excluding the Aztec VM", + "inherits": "clang20", + "binaryDir": "build-no-avm", "cacheVariables": { - "CHECK_CIRCUIT_STACKTRACES": "ON" + "DISABLE_AZTEC_VM": "ON", + "AVM_TRANSPILER_LIB": "" } }, { - "name": "clang18-assert", - "binaryDir": "build", - "displayName": "Build with Clang-18 using RelWithAssert", - "description": "Build with globally installed Clang-18 in release with ASSERTs mode", - "inherits": "clang18", - "environment": { - "CMAKE_BUILD_TYPE": "RelWithAssert" + "name": "debug-fast-no-avm", + "displayName": "Optimized debug build with Clang-20 (no AVM)", + "description": "Build with globally installed Clang-20 in debug mode excluding the Aztec VM", + "inherits": "debug-fast", + "binaryDir": "build-debug-fast-no-avm", + "cacheVariables": { + "DISABLE_AZTEC_VM": "ON", + "ENABLE_STACKTRACES": "ON", + "AVM_TRANSPILER_LIB": "" } }, { - "name": "clang16-assert", - "binaryDir": "build", - "displayName": "Build with Clang-16 using RelWithAssert", - "description": "Build with globally installed Clang-16 in release with ASSERTs mode", - "inherits": "clang16", - "environment": { - "CMAKE_BUILD_TYPE": "RelWithAssert" + "name": "debug-no-avm", + "displayName": "Debug build with Clang-20 (no AVM)", + "description": "Build with globally installed Clang-20 in debug mode excluding the Aztec VM", + "inherits": "debug", + "binaryDir": "build-debug-no-avm", + "cacheVariables": { + "DISABLE_AZTEC_VM": "ON", + "ENABLE_STACKTRACES": "ON", + "AVM_TRANSPILER_LIB": "" } }, { "name": "asan-fast", - "displayName": "Debugging build with address sanitizer on Clang-16 in optimized debug mode", - "description": "Build with address sanitizer on clang16 in optimized debug mode", - "inherits": "clang16-dbg-fast", + "displayName": "Debugging build with address sanitizer on Clang-20 in optimized debug mode", + "description": "Build with address sanitizer on clang20 in optimized debug mode", + "inherits": "debug-fast", "binaryDir": "build-asan-fast", "cacheVariables": { "ENABLE_ASAN": "ON", "DISABLE_AZTEC_VM": "ON", - "DISABLE_ASM": "ON" + "DISABLE_ASM": "ON", + "ENABLE_STACKTRACES": "OFF" } }, { "name": "asan", - "displayName": "Debugging build with address sanitizer on Clang-16", - "description": "Build with address sanitizer on clang16 with debugging information", - "inherits": "clang16-dbg", + "displayName": "Debugging build with address sanitizer on Clang-20", + "description": "Build with address sanitizer on clang20 with debugging information", + "inherits": "debug", "binaryDir": "build-asan", - "environment": { - "CMAKE_BUILD_TYPE": "Debug" - }, "cacheVariables": { "ENABLE_ASAN": "ON", - "DISABLE_ASM": "ON" + "DISABLE_ASM": "ON", + "ENABLE_STACKTRACES": "OFF" } }, { @@ -301,46 +273,23 @@ "CXXFLAGS": "-Wno-missing-field-initializers" } }, - { - "name": "gcc10", - "displayName": "Build with GCC-10", - "description": "Build with globally installed GCC-10", - "inherits": "default", - "environment": { - "CC": "gcc-10", - "CXX": "g++-10", - "CXXFLAGS": "-Wno-missing-field-initializers" - } - }, - { - "name": "gcc13", - "displayName": "Build with GCC-13", - "description": "Build with globally installed GCC-13", - "inherits": "default", - "environment": { - "CC": "gcc-13", - "CXX": "g++-13", - "CXXFLAGS": "-Wno-missing-field-initializers" - } - }, { "name": "bench", "displayName": "Build benchmarks", "description": "Build default preset but with a special benchmark directory", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-bench" }, { "name": "fuzzing", "displayName": "Build with fuzzing", "description": "Build default preset but with fuzzing enabled", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-fuzzing", "cacheVariables": { "FUZZING": "ON", - "DISABLE_AZTEC_VM": "ON" - }, - "environment": { + "DISABLE_AZTEC_VM": "ON", + "AVM_TRANSPILER_LIB": "", "CMAKE_BUILD_TYPE": "RelWithAssert" } }, @@ -348,14 +297,13 @@ "name": "fuzzing-noasm", "displayName": "Build with fuzzing and ASM disabled", "description": "Build default preset but with fuzzing enabled and ASM disabled", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-fuzzing-noasm", "cacheVariables": { "FUZZING": "ON", "DISABLE_AZTEC_VM": "ON", - "DISABLE_ASM": "ON" - }, - "environment": { + "AVM_TRANSPILER_LIB": "", + "DISABLE_ASM": "ON", "CMAKE_BUILD_TYPE": "RelWithAssert" } }, @@ -363,16 +311,15 @@ "name": "fuzzing-asan", "displayName": "Build with fuzzing", "description": "Build default preset but with fuzzing and asan enabled", - "inherits": "clang16-dbg", + "inherits": "debug", "binaryDir": "build-fuzzing-asan", "cacheVariables": { "FUZZING": "ON", "FUZZING_SHOW_INFORMATION": "ON", "DISABLE_AZTEC_VM": "ON", + "AVM_TRANSPILER_LIB": "", "ENABLE_ASAN": "ON", - "DISABLE_ASM": "ON" - }, - "environment": { + "DISABLE_ASM": "ON", "CMAKE_BUILD_TYPE": "RelWithAssert" } }, @@ -380,33 +327,35 @@ "name": "fuzzing-coverage", "displayName": "Build with coverage flags", "description": "Build default preset but with fuzzing and coverage enabled", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-fuzzing-cov", "cacheVariables": { "FUZZING": "ON", "DISABLE_ASM": "ON", - "DISABLE_AZTEC_VM": "ON" + "AVM_TRANSPILER_LIB": "", + "DISABLE_AZTEC_VM": "ON", + "CMAKE_BUILD_TYPE": "RelWithAssert" }, "environment": { - "CXXFLAGS": "-fprofile-instr-generate -fcoverage-mapping", - "CMAKE_BUILD_TYPE": "RelWithAssert" + "CXXFLAGS": "-fprofile-instr-generate -fcoverage-mapping" } }, { "name": "smt-verification", - "displayName": "Build with smt verificaiton", + "displayName": "Build with smt verification", "description": "Build default preset but with smt library included", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-smt", "cacheVariables": { + "AVM_TRANSPILER_LIB": "", "SMT": "ON" } }, { "name": "tsan", - "displayName": "Debugging build with thread sanitizer on Clang-16", - "description": "Build with thread sanitizer on clang16 with debugging information", - "inherits": "clang16-dbg", + "displayName": "Debugging build with thread sanitizer on Clang-20", + "description": "Build with thread sanitizer on clang20 with debugging information", + "inherits": "debug", "binaryDir": "build-tsan", "cacheVariables": { "HAVE_STD_REGEX": "ON" @@ -419,9 +368,9 @@ }, { "name": "ubsan", - "displayName": "Debugging build with undefined behaviour sanitizer on Clang-16", - "description": "Build with undefined behaviour sanitizer on clang16 with debugging information", - "inherits": "clang16-dbg", + "displayName": "Debugging build with undefined behaviour sanitizer on Clang-20", + "description": "Build with undefined behaviour sanitizer on clang20 with debugging information", + "inherits": "debug", "binaryDir": "build-ubsan", "generator": "Unix Makefiles", "environment": { @@ -432,9 +381,9 @@ }, { "name": "msan", - "displayName": "Debugging build with memory sanitizer on Clang-16", - "description": "Build with thread sanitizer on clang16 with debugging information", - "inherits": "clang16-dbg", + "displayName": "Debugging build with memory sanitizer on Clang-20", + "description": "Build with thread sanitizer on clang20 with debugging information", + "inherits": "debug", "binaryDir": "build-msan", "environment": { "CFLAGS": "-fsanitize=memory", @@ -448,48 +397,27 @@ } }, { - "name": "op-count", - "displayName": "Release build with operation counts", - "description": "Build with op counting", - "inherits": "clang16-assert", - "binaryDir": "build-op-count", - "environment": { - "CXXFLAGS": "-DBB_USE_OP_COUNT -DBB_USE_OP_COUNT_TRACK_ONLY" - } - }, - { - "name": "op-count-time", - "displayName": "Release build with time and clock counts", - "description": "Build with op counting", - "inherits": "clang16-assert", - "binaryDir": "build-op-count-time", - "cacheVariables": { - "DISABLE_AZTEC_VM": "ON" - }, - "environment": { - "CXXFLAGS": "-DBB_USE_OP_COUNT -DBB_USE_OP_COUNT_TIME_ONLY" - } - }, - { - "name": "clang16-coverage", + "name": "clang20-coverage", "displayName": "Build with coverage", - "description": "Build clang16 preset but with coverage enabled", - "inherits": "clang16", + "description": "Build clang20 preset but with coverage enabled", + "inherits": "clang20", "binaryDir": "build-coverage", "cacheVariables": { "COVERAGE": "ON", "DISABLE_ASM": "ON", + "AVM_TRANSPILER_LIB": "", "DISABLE_AZTEC_VM": "ON" } }, { "name": "gperftools", - "displayName": "Debugging build with gperftools on Clang-16", + "displayName": "Debugging build with gperftools on Clang-20", "description": "Build with gperf", - "inherits": "clang16", + "inherits": "clang20", "binaryDir": "build-gperftools", "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo", + "AVM_TRANSPILER_LIB": "", "CMAKE_EXE_LINKER_FLAGS": "-ltcmalloc", "CXXFLAGS": "-fno-omit-frame-pointer" } @@ -528,10 +456,8 @@ "description": "Build for pthread enabled WASM", "inherits": "wasm", "binaryDir": "build-wasm-threads", - "environment": { - "CMAKE_BUILD_TYPE": "Release" - }, "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", "MULTITHREADING": "ON" } }, @@ -541,29 +467,17 @@ "binaryDir": "build-wasm-threads-dbg", "description": "Build with wasi-sdk to create debug wasm", "inherits": "wasm", - "environment": { - "CMAKE_BUILD_TYPE": "Debug" - }, "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", "MULTITHREADING": "ON" } }, - { - "name": "wasm-threads-assert", - "displayName": "Build for WASM with multithreading and asserts", - "binaryDir": "build-wasm-threads-assert", - "description": "Build with wasi-sdk with asserts", - "inherits": "wasm-threads", - "environment": { - "CMAKE_BUILD_TYPE": "RelWithAssert" - } - }, { "name": "xray", "displayName": "Build with multi-threaded XRay Profiling", "description": "Build with Clang and enable multi-threaded LLVM XRay for profiling", "generator": "Unix Makefiles", - "inherits": "clang16", + "inherits": "clang20", "environment": { "CFLAGS": "-fxray-instrument", "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=500 -DXRAY=1", @@ -572,26 +486,109 @@ "binaryDir": "build-xray" }, { - "name": "xray-verbose", - "displayName": "Build with detailed XRay Profiling", - "description": "Build with Clang and enable detailed LLVM XRay for profiling", - "inherits": "xray", + "name": "zig-base", + "displayName": "Zig (base)", + "generator": "Ninja", + "binaryDir": "${sourceDir}/build-${presetName}", + "environment": { + "CC": "zig cc", + "CXX": "zig c++" + }, + "cacheVariables": { + "ENABLE_PIC": "ON", + "CMAKE_BUILD_TYPE": "Release", + "DISABLE_AZTEC_VM": "ON", + "CMAKE_AR": "${sourceDir}/scripts/zig-ar.sh", + "CMAKE_RANLIB": "${sourceDir}/scripts/zig-ranlib.sh" + } + }, + { + "name": "zig-amd64-macos", + "displayName": "Build for macOS amd64 with Zig", + "description": "Cross-compile for intel macOS using Zig", + "binaryDir": "build-zig-amd64-macos", + "generator": "Ninja", + "inherits": "zig-base", + "environment": { + "CC": "zig cc -target x86_64-macos -mcpu=baseline", + "CXX": "zig c++ -target x86_64-macos -mcpu=baseline" + }, + "cacheVariables": { + "CMAKE_SYSTEM_NAME": "Darwin", + "CMAKE_SYSTEM_PROCESSOR": "x86_64", + "AVM_TRANSPILER_LIB": "${sourceDir}/../../avm-transpiler/target/x86_64-apple-darwin/release/libavm_transpiler.a", + "HAVE_STD_REGEX": "ON" + } + }, + { + "name": "zig-arm64-macos", + "displayName": "Build for macOS arm64 with Zig", + "description": "Cross-compile for macOS arm64 (Apple Silicon) using Zig", + "binaryDir": "build-zig-arm64-macos", + "generator": "Ninja", + "inherits": "zig-base", + "environment": { + "CC": "zig cc -target aarch64-macos -mcpu=apple_a14", + "CXX": "zig c++ -target aarch64-macos -mcpu=apple_a14" + }, + "cacheVariables": { + "CMAKE_SYSTEM_NAME": "Darwin", + "CMAKE_SYSTEM_PROCESSOR": "aarch64", + "AVM_TRANSPILER_LIB": "${sourceDir}/../../avm-transpiler/target/aarch64-apple-darwin/release/libavm_transpiler.a", + "HAVE_STD_REGEX": "ON" + } + }, + { + "name": "zig-node-amd64-linux", + "inherits": "zig-base", + "environment": { + "CC": "zig cc -target x86_64-linux-gnu", + "CXX": "zig c++ -target x86_64-linux-gnu", + "LDFLAGS": "-s" + }, + "cacheVariables": { + "CMAKE_SYSTEM_NAME": "Linux", + "TARGET_ARCH": "skylake" + } + }, + { + "name": "zig-node-arm64-linux", + "inherits": "zig-base", "environment": { - "CFLAGS": "-fxray-instrument -fxray-instruction-threshold=100 -finline-max-stacksize=150 -DXRAY=1", - "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=100 -finline-max-stacksize=150 -DXRAY=1", - "LDFLAGS": "-fxray-instrument -fxray-instruction-threshold=100 -finline-max-stacksize=150 -DXRAY=1" + "CC": "zig cc -target aarch64-linux-gnu", + "CXX": "zig c++ -target aarch64-linux-gnu", + "LDFLAGS": "-s" }, - "binaryDir": "build-xray-verbose" + "cacheVariables": { + "CMAKE_SYSTEM_NAME": "Linux", + "TARGET_ARCH": "generic" + } }, { - "name": "xray-1thread", - "displayName": "Build with single-threaded XRay Profiling", - "description": "Build with Clang and enable single-threaded LLVM XRay for profiling", - "inherits": "xray", + "name": "zig-node-amd64-macos", + "inherits": "zig-base", + "environment": { + "CC": "zig cc -target x86_64-macos", + "CXX": "zig c++ -target x86_64-macos", + "LDFLAGS": "-Wl,-undefined,dynamic_lookup" + }, "cacheVariables": { - "MULTITHREADING": "OFF" + "CMAKE_SYSTEM_NAME": "Darwin", + "TARGET_ARCH": "sandybridge" + } + }, + { + "name": "zig-node-arm64-macos", + "inherits": "zig-base", + "environment": { + "CC": "zig cc -target aarch64-macos -mcpu=apple_a14", + "CXX": "zig c++ -target aarch64-macos -mcpu=apple_a14", + "LDFLAGS": "-Wl,-undefined,dynamic_lookup" }, - "binaryDir": "build-xray-1thread" + "cacheVariables": { + "CMAKE_SYSTEM_NAME": "Darwin", + "CMAKE_SYSTEM_PROCESSOR": "aarch64" + } } ], "buildPresets": [ @@ -607,49 +604,29 @@ "configurePreset": "homebrew" }, { - "name": "clang18", + "name": "clang20", "inherits": "default", - "configurePreset": "clang18" + "configurePreset": "clang20" }, { - "name": "clang16", + "name": "debug", "inherits": "default", - "configurePreset": "clang16" + "configurePreset": "debug" }, { - "name": "op-count-time", + "name": "clang20-no-avm", "inherits": "default", - "configurePreset": "op-count-time" + "configurePreset": "clang20-no-avm" }, { - "name": "op-count", + "name": "debug-fast-no-avm", "inherits": "default", - "configurePreset": "op-count" + "configurePreset": "debug-fast-no-avm" }, { - "name": "darwin-arm64", + "name": "debug-no-avm", "inherits": "default", - "configurePreset": "darwin-arm64" - }, - { - "name": "darwin-amd64", - "inherits": "default", - "configurePreset": "darwin-amd64" - }, - { - "name": "clang16-dbg", - "inherits": "default", - "configurePreset": "clang16-dbg" - }, - { - "name": "clang16-no-avm", - "inherits": "default", - "configurePreset": "clang16-no-avm" - }, - { - "name": "clang16-dbg-no-avm", - "inherits": "default", - "configurePreset": "clang16-dbg-no-avm" + "configurePreset": "debug-no-avm" }, { "name": "tracy-memory", @@ -667,14 +644,9 @@ "configurePreset": "tracy-time-sampled" }, { - "name": "clang16-pic", - "inherits": "default", - "configurePreset": "clang16-pic" - }, - { - "name": "clang16-pic-assert", + "name": "clang20-pic", "inherits": "default", - "configurePreset": "clang16-pic-assert" + "configurePreset": "clang20-pic" }, { "name": "default-pic", @@ -687,24 +659,14 @@ "configurePreset": "tracy-gates" }, { - "name": "clang16-dbg-fast", + "name": "debug-fast", "inherits": "default", - "configurePreset": "clang16-dbg-fast" - }, - { - "name": "clang16-dbg-fast-circuit-check-traces", - "inherits": "clang16-dbg-fast", - "configurePreset": "clang16-dbg-fast-circuit-check-traces" + "configurePreset": "debug-fast" }, { - "name": "clang18-assert", + "name": "debug-fast-notraces", "inherits": "default", - "configurePreset": "clang18-assert" - }, - { - "name": "clang16-assert", - "inherits": "default", - "configurePreset": "clang16-assert" + "configurePreset": "debug-fast-notraces" }, { "name": "asan-fast", @@ -721,44 +683,34 @@ "inherits": "default", "configurePreset": "gcc" }, - { - "name": "gcc10", - "inherits": "default", - "configurePreset": "gcc10" - }, - { - "name": "gcc13", - "inherits": "default", - "configurePreset": "gcc13" - }, { "name": "bench", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "bench" }, { "name": "fuzzing", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "fuzzing" }, { "name": "fuzzing-asan", - "inherits": "clang16-dbg", + "inherits": "debug", "configurePreset": "fuzzing-asan" }, { "name": "fuzzing-coverage", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "fuzzing-coverage" }, { "name": "gperftools", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "gperftools" }, { "name": "smt-verification", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "smt-verification" }, { @@ -777,9 +729,9 @@ "configurePreset": "ubsan" }, { - "name": "clang16-coverage", - "inherits": "clang16-dbg-fast", - "configurePreset": "clang16-coverage" + "name": "clang20-coverage", + "inherits": "debug-fast", + "configurePreset": "clang20-coverage" }, { "name": "wasm", @@ -799,14 +751,7 @@ "configurePreset": "wasm-threads-dbg", "inheritConfigureEnvironment": true, "jobs": 0, - "targets": ["barretenberg.wasm", "bb_cli_bench"] - }, - { - "name": "wasm-threads-assert", - "configurePreset": "wasm-threads-assert", - "inheritConfigureEnvironment": true, - "jobs": 0, - "targets": ["barretenberg.wasm", "bb_cli_bench"] + "targets": ["barretenberg.wasm", "bb"] }, { "name": "wasm-threads", @@ -821,14 +766,38 @@ "inherits": "default" }, { - "name": "xray-verbose", - "configurePreset": "xray-verbose", - "inherits": "default" + "name": "zig-node-amd64-linux", + "configurePreset": "zig-node-amd64-linux", + "inheritConfigureEnvironment": true, + "targets": ["nodejs_module"] }, { - "name": "xray-1thread", - "configurePreset": "xray-1thread", - "inherits": "default" + "name": "zig-node-arm64-linux", + "configurePreset": "zig-node-arm64-linux", + "inheritConfigureEnvironment": true, + "targets": ["nodejs_module"] + }, + { + "name": "zig-node-amd64-macos", + "configurePreset": "zig-node-amd64-macos", + "inheritConfigureEnvironment": true, + "targets": ["nodejs_module"] + }, + { + "name": "zig-node-arm64-macos", + "configurePreset": "zig-node-arm64-macos", + "inheritConfigureEnvironment": true, + "targets": ["nodejs_module"] + }, + { + "name": "zig-arm64-macos", + "configurePreset": "zig-arm64-macos", + "inheritConfigureEnvironment": true + }, + { + "name": "zig-amd64-macos", + "configurePreset": "zig-amd64-macos", + "inheritConfigureEnvironment": true } ], "testPresets": [ @@ -843,14 +812,14 @@ "configurePreset": "homebrew" }, { - "name": "clang16", + "name": "clang20", "inherits": "default", - "configurePreset": "clang16" + "configurePreset": "clang20" }, { - "name": "clang16-dbg", + "name": "debug", "inherits": "default", - "configurePreset": "clang16-dbg" + "configurePreset": "debug" }, { "name": "asan", @@ -862,40 +831,30 @@ "inherits": "default", "configurePreset": "gcc" }, - { - "name": "gcc10", - "inherits": "default", - "configurePreset": "gcc10" - }, - { - "name": "gcc13", - "inherits": "default", - "configurePreset": "gcc13" - }, { "name": "bench", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "bench" }, { "name": "fuzzing", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "fuzzing" }, { "name": "fuzzing-asan", - "inherits": "clang16-dbg", + "inherits": "debug", "configurePreset": "fuzzing-asan" }, { "name": "smt-verification", - "inherits": "clang16", + "inherits": "clang20", "configurePreset": "smt-verification" }, { - "name": "clang16-coverage", + "name": "clang20-coverage", "inherits": "default", - "configurePreset": "clang16-coverage" + "configurePreset": "clang20-coverage" }, { "name": "wasm", diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index 7c9c4dab4287..adea0e0622c1 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -4,22 +4,31 @@ source $(git rev-parse --show-toplevel)/ci3/source_bootstrap cmd=${1:-} [ -n "$cmd" ] && shift -export native_preset=${NATIVE_PRESET:-clang16-assert} -export pic_preset=${PIC_PRESET:-clang16-pic-assert} +export native_preset=${NATIVE_PRESET:-clang20} +export pic_preset=${PIC_PRESET:-clang20-pic} export hash=$(cache_content_hash .rebuild_patterns) -if [[ $(arch) == "arm64" && "$CI" -eq 1 ]]; then - # Enable AVM for release builds (when REF_NAME is a valid semver), disable for CI/PR builds - if ! semver check "${REF_NAME:-}"; then - export DISABLE_AZTEC_VM=1 - fi -fi - if [ "${DISABLE_AZTEC_VM:-0}" -eq 1 ]; then # Make sure the different envs don't read from each other's caches. export hash="$hash-no-avm" fi +function ensure_zig { + if command -v zig &>/dev/null; then + return + fi + local arch=$(uname -m) + local zig_version=0.15.1 + local bin_path=/opt/zig-${arch}-linux-${zig_version} + if [ -f $bin_path/zig ]; then + export PATH="$bin_path:$PATH" + return + fi + echo "Installing zig $zig_version..." + curl -sL https://ziglang.org/download/$zig_version/zig-${arch}-linux-$zig_version.tar.xz | sudo tar -xJ -C /opt + export PATH="$bin_path:$PATH" +} + # Injects version number into a given bb binary. # Means we don't actually need to rebuild bb to release a new version if code hasn't changed. function inject_version { @@ -48,7 +57,11 @@ function build_preset() { local preset=$1 shift # DISABLE_AZTEC_VM is set to 1 in CI for arm64, or in dev usage if you export DISABLE_AZTEC_VM=1 - cmake --fresh --preset "$preset" ${DISABLE_AZTEC_VM:+-DDISABLE_AZTEC_VM=$DISABLE_AZTEC_VM} + local cmake_args=() + if [ "${DISABLE_AZTEC_VM:-0}" -eq 1 ]; then + cmake_args+=(-DDISABLE_AZTEC_VM=1 -DAVM_TRANSPILER_LIB="") + fi + cmake --fresh --preset "$preset" "${cmake_args[@]}" cmake --build --preset "$preset" "$@" } @@ -76,28 +89,33 @@ function build_asan_fast { function build_nodejs_module { set -eu + ensure_zig (cd src/barretenberg/nodejs_module && yarn --frozen-lockfile --prefer-offline) if ! cache_download barretenberg-native-nodejs-module-$hash.zst; then - build_preset $pic_preset --target nodejs_module - cache_upload barretenberg-native-nodejs-module-$hash.zst build-pic/lib/nodejs_module.node + parallel --line-buffered --tag --halt now,fail=1 build_preset ::: \ + zig-node-amd64-linux \ + zig-node-arm64-linux \ + zig-node-amd64-macos \ + zig-node-arm64-macos + cache_upload barretenberg-native-nodejs-module-$hash.zst build-zig-*-*/lib/nodejs_module.node fi } -function build_darwin { +function build_darwin_arm64 { set -eu - local arch=${1:-$(arch)} - if ! cache_download barretenberg-darwin-$hash.zst; then - # Download sdk. - local osx_sdk="MacOSX14.0.sdk" - if ! [ -d "/opt/osxcross/SDK/$osx_sdk" ]; then - echo "Downloading $osx_sdk..." - local osx_sdk_url="https://github.com/joseluisq/macosx-sdks/releases/download/14.0/${osx_sdk}.tar.xz" - curl -sSL "$osx_sdk_url" | sudo tar -xJ -C /opt/osxcross/SDK - sudo rm -rf /opt/osxcross/SDK/$osx_sdk/System - fi + ensure_zig + if ! cache_download barretenberg-arm64-macos-$hash.zst; then + build_preset zig-arm64-macos --target bb + cache_upload barretenberg-arm64-macos-$hash.zst build-zig-arm64-macos/bin + fi +} - build_preset darwin-$arch --target bb - cache_upload barretenberg-darwin-$hash.zst build-darwin-$arch/bin +function build_darwin_amd64 { + set -eu + ensure_zig + if ! cache_download barretenberg-amd64-macos-$hash.zst; then + build_preset zig-amd64-macos --target bb + cache_upload barretenberg-amd64-macos-$hash.zst build-zig-amd64-macos/bin fi } @@ -147,6 +165,31 @@ function build_fuzzing_syntax_check_only { cache_upload barretenberg-fuzzing-$hash.zst build-fuzzing/syntax-check-success.flag } +# Do basic tests that the smt preset still compiles and runs +function build_smt_verification { + set -eu + if cache_download barretenberg-smt-$hash.zst; then + return + fi + + if ! dpkg -l python3-pip python3-venv m4 bison >/dev/null 2>&1; then + sudo apt update && sudo apt install -y python3-pip python3-venv m4 bison + fi + cmake --preset smt-verification + + cvc5_cmake_hash=$(cache_content_hash ^barretenberg/cpp/src/barretenberg/smt_verification/CMakeLists.txt) + if cache_download barretenberg-cvc5-$cvc5_cmake_hash.zst; then + # Restore machine-dependent paths after downloading cache + find build-smt/_deps/cvc5 -type f -name "*.cmake" -exec sed -i "s|/workspace|$(pwd)|g" {} \; + else + cmake --build build-smt --target cvc5 + cache_upload barretenberg-cvc5-$cvc5_cmake_hash.zst build-smt/_deps/cvc5 + fi + + cmake --build build-smt --target smt_verification_tests + cache_upload barretenberg-smt-$hash.zst build-smt +} + function build_release { local arch=$(arch) rm -rf build-release @@ -156,19 +199,41 @@ function build_release { inject_version build-release/bb tar -czf build-release/barretenberg-$arch-linux.tar.gz -C build-release --remove-files bb - # Only release wasms built on amd64. + # Only release wasms and macOS builds on amd64. if [ "$arch" == "amd64" ]; then tar -czf build-release/barretenberg-wasm.tar.gz -C build-wasm/bin barretenberg.wasm tar -czf build-release/barretenberg-debug-wasm.tar.gz -C build-wasm/bin barretenberg-debug.wasm tar -czf build-release/barretenberg-threads-wasm.tar.gz -C build-wasm-threads/bin barretenberg.wasm tar -czf build-release/barretenberg-threads-debug-wasm.tar.gz -C build-wasm-threads/bin barretenberg-debug.wasm + + # Download ldid for code signing + if [ ! -f build/ldid ]; then + echo "Downloading ldid for macOS code signing..." + curl -sL https://github.com/ProcursusTeam/ldid/releases/download/v2.1.5-procursus7/ldid_linux_x86_64 -o build/ldid + chmod +x build/ldid + fi + + if semver check "$REF_NAME" && [[ "$(arch)" == "amd64" ]]; then + # Package arm64-macos + cp build-zig-arm64-macos/bin/bb build-release/bb + inject_version build-release/bb + build/ldid -S build-release/bb + tar -czf build-release/barretenberg-arm64-darwin.tar.gz -C build-release --remove-files bb + + # Package amd64-macos + cp build-zig-amd64-macos/bin/bb build-release/bb + inject_version build-release/bb + build/ldid -S build-release/bb + tar -czf build-release/barretenberg-amd64-darwin.tar.gz -C build-release --remove-files bb + fi fi } -export -f build_preset build_native build_asan_fast build_darwin build_nodejs_module build_wasm build_wasm_threads build_gcc_syntax_check_only build_fuzzing_syntax_check_only +export -f ensure_zig build_preset build_native build_asan_fast build_darwin_amd64 build_darwin_arm64 build_nodejs_module build_wasm build_wasm_threads build_gcc_syntax_check_only build_fuzzing_syntax_check_only build_smt_verification function build { echo_header "bb cpp build" + ensure_zig builds=( build_native build_nodejs_module @@ -176,10 +241,12 @@ function build { build_wasm_threads ) if [ "$(arch)" == "amd64" ] && [ "$CI" -eq 1 ]; then - builds+=(build_gcc_syntax_check_only build_fuzzing_syntax_check_only build_asan_fast) + builds+=(build_gcc_syntax_check_only build_fuzzing_syntax_check_only build_asan_fast build_smt_verification) fi - if [ "$CI_FULL" -eq 1 ]; then - builds+=(build_darwin) + if semver check "$REF_NAME" && [[ "$(arch)" == "amd64" ]]; then + # macOS builds require the avm-transpiler linked. + # We build them using zig cross-compilation. + builds+=(build_darwin_arm64 build_darwin_amd64) fi parallel --line-buffered --tag --halt now,fail=1 denoise {} ::: ${builds[@]} build_release @@ -225,35 +292,40 @@ function test_cmds { echo -e "$prefix barretenberg/cpp/build-asan-fast/bin/$bin_name --gtest_filter=$filter" done fi + + # Run the SMT compatibility tests + if [ "$(arch)" == "amd64" ] && [ "$CI" -eq 1 ]; then + local prefix="$hash:CPUS=4:MEM=8g" + echo -e "$prefix barretenberg/cpp/build-smt/bin/smt_verification_tests" + fi + echo "$hash barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh" } # This is not called in ci. It is just for a developer to run the tests. function test { echo_header "bb test" - test_cmds | filter_test_cmds | parallelise + test_cmds | filter_test_cmds | parallelize } function build_bench { set -eu if ! cache_download barretenberg-benchmarks-$hash.zst; then # Run builds in parallel with different targets per preset - # bb_cli_bench is later used in yarn-project. parallel --line-buffered denoise ::: \ - "build_preset $native_preset --target ultra_honk_bench --target client_ivc_bench --target bb_cli_bench --target honk_solidity_proof_gen" \ - "build_preset wasm-threads --target ultra_honk_bench --target client_ivc_bench --target bb_cli_bench" + "build_preset $native_preset --target ultra_honk_bench --target client_ivc_bench --target bb --target honk_solidity_proof_gen" \ + "build_preset wasm-threads --target ultra_honk_bench --target client_ivc_bench --target bb" cache_upload barretenberg-benchmarks-$hash.zst \ - {build,build-wasm-threads}/bin/{ultra_honk_bench,client_ivc_bench,bb_cli_bench} + {build,build-wasm-threads}/bin/{ultra_honk_bench,client_ivc_bench,bb} fi } function bench_cmds { prefix="$hash:CPUS=8" echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/ultra_honk build/bin/ultra_honk_bench construct_proof_ultrahonk_power_of_2/20$" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/client_ivc build/bin/client_ivc_bench ClientIVCBench/Full/6$" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/client_ivc_17_in_20 build/bin/client_ivc_bench ClientIVCBench/Ambient_17_in_20/6$" + echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/client_ivc build/bin/client_ivc_bench ClientIVCBench/Full/5$" echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/ultra_honk build-wasm-threads/bin/ultra_honk_bench construct_proof_ultrahonk_power_of_2/20$" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/client_ivc build-wasm-threads/bin/client_ivc_bench ClientIVCBench/Full/6$" + echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/client_ivc build-wasm-threads/bin/client_ivc_bench ClientIVCBench/Full/5$" prefix="$hash:CPUS=1" echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/client_ivc_verify build/bin/client_ivc_bench VerificationOnly$" } @@ -262,7 +334,7 @@ function bench_cmds { function bench { echo_header "bb bench" rm -rf bench-out && mkdir -p bench-out - bench_cmds | STRICT_SCHEDULING=1 parallelise + bench_cmds | STRICT_SCHEDULING=1 parallelize } # Upload assets to release. @@ -291,7 +363,7 @@ case "$cmd" in bench_ivc) # Intended only for dev usage. For CI usage, we run yarn-project/end-to-end/bootstrap.sh bench. # Sample usage (CI=1 required for bench results to be visible; exclude NO_WASM=1 to run wasm benchmarks): - # CI=1 NO_WASM=1 NATIVE_PRESET=op-count-time ./barretenberg/cpp/bootstrap.sh bench_ivc transfer_0_recursions+sponsored_fpc + # CI=1 NO_WASM=1 ./barretenberg/cpp/bootstrap.sh bench_ivc transfer_0_recursions+sponsored_fpc git fetch origin next flow_filter="${1:-}" # optional string-match filter for flow names @@ -299,10 +371,10 @@ case "$cmd" in # Build both native and wasm benchmark binaries builds=( - "build_preset $native_preset --target bb_cli_bench --target bb" + "build_preset $native_preset --target bb" ) if [[ "${NO_WASM:-}" != "1" ]]; then - builds+=("build_preset wasm-threads --target bb_cli_bench") + builds+=("build_preset wasm-threads --target bb") fi parallel --line-buffered --tag -v denoise ::: "${builds[@]}" @@ -310,12 +382,13 @@ case "$cmd" in export AZTEC_CACHE_COMMIT=$commit_hash # TODO currently does nothing! to reinstate in cache_download export FORCE_CACHE_DOWNLOAD=${FORCE_CACHE_DOWNLOAD:-1} - USE_CIRCUITS_CACHE=1 BOOTSTRAP_AFTER=barretenberg BOOSTRAP_TO=yarn-project ../../bootstrap.sh + # make sure that disabling the aztec VM does not interfere with cache results from CI. + DISABLE_AZTEC_VM="" BOOTSTRAP_AFTER=barretenberg BOOSTRAP_TO=yarn-project ../../bootstrap.sh rm -rf bench-out # Recreation of logic from bench. - ../../yarn-project/end-to-end/bootstrap.sh build_bench + DISABLE_AZTEC_VM="" ../../yarn-project/end-to-end/bootstrap.sh build_bench # Extract and filter benchmark commands by flow name and wasm/no-wasm function ivc_bench_cmds { @@ -330,12 +403,12 @@ case "$cmd" in echo "Running commands:" ivc_bench_cmds "$flow_filter" - ivc_bench_cmds "$flow_filter" | STRICT_SCHEDULING=1 parallelise + ivc_bench_cmds "$flow_filter" | STRICT_SCHEDULING=1 parallelize ;; "hash") echo $hash ;; - test|test_cmds|bench|bench_cmds|build_bench|release|build_native|build_nodejs_module|build_asan_fast|build_wasm|build_wasm_threads|build_gcc_syntax_check_only|build_fuzzing_syntax_check_only|build_darwin|build_release|inject_version) + test|test_cmds|bench|bench_cmds|build_bench|release|build_native|build_nodejs_module|build_asan_fast|build_darwin_arm64|build_darwin_amd64|build_wasm|build_wasm_threads|build_gcc_syntax_check_only|build_fuzzing_syntax_check_only|build_darwin|build_release|build_smt_verification|inject_version) $cmd "$@" ;; *) diff --git a/barretenberg/cpp/cmake/arch.cmake b/barretenberg/cpp/cmake/arch.cmake index afe367478dbd..71b6dbb599ff 100644 --- a/barretenberg/cpp/cmake/arch.cmake +++ b/barretenberg/cpp/cmake/arch.cmake @@ -5,7 +5,7 @@ if(WASM) add_compile_options(-fno-exceptions -fno-slp-vectorize) endif() -if(NOT WASM AND NOT ARM) +if(NOT WASM AND NOT ARM AND TARGET_ARCH) message(STATUS "Target architecture: ${TARGET_ARCH}") add_compile_options(-march=${TARGET_ARCH}) endif() diff --git a/barretenberg/cpp/cmake/avm-transpiler.cmake b/barretenberg/cpp/cmake/avm-transpiler.cmake new file mode 100644 index 000000000000..16038f8d0b92 --- /dev/null +++ b/barretenberg/cpp/cmake/avm-transpiler.cmake @@ -0,0 +1,28 @@ +# avm-transpiler static library configuration +# AVM_TRANSPILER_LIB should be set by the CMake preset to point to the appropriate library + +if(NOT AVM_TRANSPILER_LIB) + message(FATAL_ERROR "AVM_TRANSPILER_LIB is not set. Set it in your CMake preset to the path of libavm_transpiler.a") +endif() + +# Set the path to avm-transpiler relative to barretenberg +set(AVM_TRANSPILER_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../avm-transpiler") +set(AVM_TRANSPILER_INCLUDE "${AVM_TRANSPILER_DIR}") + +# Check if the library exists +if(NOT EXISTS ${AVM_TRANSPILER_LIB}) + message(FATAL_ERROR "avm-transpiler library not found at ${AVM_TRANSPILER_LIB}\nPlease run './bootstrap.sh' in ${AVM_TRANSPILER_DIR} to build libraries") +endif() + +# Create imported library target +add_library(avm_transpiler STATIC IMPORTED) +set_target_properties(avm_transpiler PROPERTIES + IMPORTED_LOCATION ${AVM_TRANSPILER_LIB} + INTERFACE_INCLUDE_DIRECTORIES ${AVM_TRANSPILER_INCLUDE} +) + +# Define ENABLE_AVM_TRANSPILER globally when transpiler is available +add_definitions(-DENABLE_AVM_TRANSPILER) + +message(STATUS "avm-transpiler library: ${AVM_TRANSPILER_LIB}") +message(STATUS "avm-transpiler include: ${AVM_TRANSPILER_INCLUDE}") diff --git a/barretenberg/cpp/cmake/backward-cpp.cmake b/barretenberg/cpp/cmake/backward-cpp.cmake index b0d70b5e2cba..f7981a5162ac 100644 --- a/barretenberg/cpp/cmake/backward-cpp.cmake +++ b/barretenberg/cpp/cmake/backward-cpp.cmake @@ -1,4 +1,4 @@ -if(CHECK_CIRCUIT_STACKTRACES) +if(ENABLE_STACKTRACES) include(FetchContent) # Also requires one of: libbfd (gnu binutils), libdwarf, libdw (elfutils) @@ -8,4 +8,4 @@ if(CHECK_CIRCUIT_STACKTRACES) SYSTEM # optional, the Backward include directory will be treated as system directory ) FetchContent_MakeAvailable(backward) -endif() \ No newline at end of file +endif() diff --git a/barretenberg/cpp/cmake/httplib.cmake b/barretenberg/cpp/cmake/httplib.cmake new file mode 100644 index 000000000000..4b81f6344492 --- /dev/null +++ b/barretenberg/cpp/cmake/httplib.cmake @@ -0,0 +1,24 @@ +include(FetchContent) + +set(HTTPLIB_INCLUDE "${CMAKE_BINARY_DIR}/_deps/httplib-src") + +FetchContent_Declare( + httplib + GIT_REPOSITORY https://github.com/yhirose/cpp-httplib.git + GIT_TAG v0.15.3 +) + +# Disable SSL/TLS support to avoid OpenSSL dependency +set(HTTPLIB_REQUIRE_OPENSSL OFF CACHE BOOL "") +set(HTTPLIB_USE_OPENSSL_IF_AVAILABLE OFF CACHE BOOL "") +set(HTTPLIB_USE_ZLIB_IF_AVAILABLE OFF CACHE BOOL "") +set(HTTPLIB_USE_BROTLI_IF_AVAILABLE OFF CACHE BOOL "") + +FetchContent_GetProperties(httplib) +if(NOT httplib_POPULATED) + FetchContent_Populate(httplib) +endif() + +# Create interface library for httplib +add_library(httplib_headers INTERFACE) +target_include_directories(httplib_headers SYSTEM INTERFACE ${httplib_SOURCE_DIR}) diff --git a/barretenberg/cpp/cmake/libdeflate.cmake b/barretenberg/cpp/cmake/libdeflate.cmake index a9d19d62e7ba..4f1b66c05fca 100644 --- a/barretenberg/cpp/cmake/libdeflate.cmake +++ b/barretenberg/cpp/cmake/libdeflate.cmake @@ -7,11 +7,15 @@ set(LIBDEFLATE_INCLUDE "${CMAKE_BINARY_DIR}/_deps/libdeflate-src/") set(LIBDEFLATE_BUILD_SHARED_LIB OFF CACHE BOOL "Don't build shared libdeflate library") set(LIBDEFLATE_BUILD_GZIP OFF CACHE BOOL "Don't build libdeflate gzip program") +# required for macos cross build +add_definitions(-DLIBDEFLATE_ASSEMBLER_DOES_NOT_SUPPORT_SHA3) + FetchContent_Declare( libdeflate GIT_REPOSITORY https://github.com/ebiggers/libdeflate.git - GIT_TAG b03254d978d7af21a7512dee8fdc3367bc15c656 + GIT_TAG 96836d7d9d10e3e0d53e6edb54eb908514e336c4 ) # Download and populate libdeflate FetchContent_MakeAvailable(libdeflate) + diff --git a/barretenberg/cpp/cmake/lmdb.cmake b/barretenberg/cpp/cmake/lmdb.cmake index c2ffefeb2693..b267d308bc0a 100644 --- a/barretenberg/cpp/cmake/lmdb.cmake +++ b/barretenberg/cpp/cmake/lmdb.cmake @@ -13,7 +13,7 @@ ExternalProject_Add( GIT_TAG ddd0a773e2f44d38e4e31ec9ed81af81f4e4ccbb BUILD_IN_SOURCE YES CONFIGURE_COMMAND "" # No configure step - BUILD_COMMAND make -C libraries/liblmdb -e XCFLAGS=-fPIC liblmdb.a + BUILD_COMMAND ${CMAKE_COMMAND} -E env CC=${CMAKE_C_COMPILER}${CMAKE_C_COMPILER_ARG1} AR=${CMAKE_AR} make -e -C libraries/liblmdb XCFLAGS=-fPIC liblmdb.a INSTALL_COMMAND "" UPDATE_COMMAND "" # No update step BUILD_BYPRODUCTS ${LMDB_LIB} diff --git a/barretenberg/cpp/cmake/module.cmake b/barretenberg/cpp/cmake/module.cmake index aab11d6afcf9..91f15ab75a99 100644 --- a/barretenberg/cpp/cmake/module.cmake +++ b/barretenberg/cpp/cmake/module.cmake @@ -26,7 +26,7 @@ target_sources( function(barretenberg_module MODULE_NAME) file(GLOB_RECURSE SOURCE_FILES *.cpp) file(GLOB_RECURSE HEADER_FILES *.hpp *.tcc) - list(FILTER SOURCE_FILES EXCLUDE REGEX ".*\.(fuzzer|test|bench).cpp$") + list(FILTER SOURCE_FILES EXCLUDE REGEX ".*\\.(fuzzer|test|bench)\\.cpp$") target_sources( barretenberg_headers @@ -57,7 +57,7 @@ function(barretenberg_module MODULE_NAME) ${TBB_IMPORTED_TARGETS} ) - if(CHECK_CIRCUIT_STACKTRACES) + if(ENABLE_STACKTRACES) target_link_libraries( ${MODULE_NAME}_objects PUBLIC @@ -108,7 +108,7 @@ function(barretenberg_module MODULE_NAME) ) list(APPEND exe_targets ${MODULE_NAME}_tests) - if(CHECK_CIRCUIT_STACKTRACES) + if(ENABLE_STACKTRACES) target_link_libraries( ${MODULE_NAME}_test_objects PUBLIC @@ -198,7 +198,21 @@ function(barretenberg_module MODULE_NAME) ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer PRIVATE ${MODULE_LINK_NAME} + ${ARGN} ) + + if(ENABLE_STACKTRACES) + target_link_libraries( + ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer + PUBLIC + Backward::Interface + ) + target_link_options( + ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer + PRIVATE + -ldw -lelf + ) + endif() endforeach() endif() @@ -236,7 +250,7 @@ function(barretenberg_module MODULE_NAME) ${TRACY_LIBS} ${TBB_IMPORTED_TARGETS} ) - if(CHECK_CIRCUIT_STACKTRACES) + if(ENABLE_STACKTRACES) target_link_libraries( ${BENCHMARK_NAME}_bench_objects PUBLIC diff --git a/barretenberg/cpp/cmake/nlohmann_json.cmake b/barretenberg/cpp/cmake/nlohmann_json.cmake new file mode 100644 index 000000000000..ee04cb4c010a --- /dev/null +++ b/barretenberg/cpp/cmake/nlohmann_json.cmake @@ -0,0 +1,22 @@ +include(FetchContent) + +set(NLOHMANN_JSON_INCLUDE "${CMAKE_BINARY_DIR}/_deps/nlohmann_json-src/include") + +FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.3 + FIND_PACKAGE_ARGS +) + +set(JSON_BuildTests OFF CACHE INTERNAL "") +set(JSON_Install OFF CACHE INTERNAL "") + +FetchContent_MakeAvailable(nlohmann_json) + +if(NOT nlohmann_json_FOUND) + # FetchContent_MakeAvailable calls FetchContent_Populate if `find_package` is unsuccessful + # so these variables will be available if we reach this case + set_property(DIRECTORY ${nlohmann_json_SOURCE_DIR} PROPERTY EXCLUDE_FROM_ALL) + set_property(DIRECTORY ${nlohmann_json_BINARY_DIR} PROPERTY EXCLUDE_FROM_ALL) +endif() diff --git a/barretenberg/cpp/docs/Doxyfile b/barretenberg/cpp/docs/Doxyfile index 2bdf7d08b706..8f2402c50c2f 100644 --- a/barretenberg/cpp/docs/Doxyfile +++ b/barretenberg/cpp/docs/Doxyfile @@ -610,7 +610,7 @@ INTERNAL_DOCS = NO # Possible values are: SYSTEM, NO and YES. # The default value is: SYSTEM. -CASE_SENSE_NAMES = YES +CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the diff --git a/barretenberg/cpp/docs/Fuzzing.md b/barretenberg/cpp/docs/Fuzzing.md index 95bc96949cd8..adc5b48cad1c 100644 --- a/barretenberg/cpp/docs/Fuzzing.md +++ b/barretenberg/cpp/docs/Fuzzing.md @@ -46,6 +46,7 @@ The `run.sh` script supports several configuration options: To build with standard clang: ```bash +sudo apt-get install libclang-rt-18-dev cmake --preset fuzzing cmake --build --preset fuzzing ``` @@ -125,8 +126,8 @@ Also, both bigfield and safeuint fuzzer containt the SHOW_INFORMATION preprocess Build with coverage instrumentation: ```bash -cmake --preset clang16-coverage -DFUZZING=ON -cmake --build --preset clang16-coverage +cmake --preset clang20-coverage -DFUZZING=ON +cmake --build --preset clang20-coverage ``` Then run the fuzzer on the corpus and generate the HTML coverage reports: diff --git a/barretenberg/cpp/format.sh b/barretenberg/cpp/format.sh index f657d2a2f514..e1836e48165f 100755 --- a/barretenberg/cpp/format.sh +++ b/barretenberg/cpp/format.sh @@ -4,27 +4,27 @@ set -e if [ "$1" == "staged" ]; then echo Formatting barretenberg staged files... for FILE in $(git diff-index --diff-filter=d --relative --cached --name-only HEAD | grep -e '\.\(cpp\|hpp\|tcc\)$'); do - clang-format-16 -i $FILE + clang-format-20 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak git add $FILE done elif [ "$1" == "changed" ]; then echo Formatting barretenberg changed files... for FILE in $(git diff-index --diff-filter=d --relative --name-only HEAD | grep -e '\.\(cpp\|hpp\|tcc\)$'); do - clang-format-16 -i $FILE + clang-format-20 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak done elif [ "$1" == "check" ]; then find ./src -iname *.hpp -o -iname *.cpp -o -iname *.tcc | grep -v src/msgpack-c | grep -v bb/deps | \ - parallel -N10 clang-format-16 --dry-run --Werror + parallel -N10 clang-format-20 --dry-run --Werror elif [ -n "$1" ]; then for FILE in $(git diff-index --relative --name-only $1 | grep -e '\.\(cpp\|hpp\|tcc\)$'); do - clang-format-16 -i $FILE + clang-format-20 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak done else for FILE in $(find ./src -iname *.hpp -o -iname *.cpp -o -iname *.tcc | grep -v src/msgpack-c); do - clang-format-16 -i $FILE + clang-format-20 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak done fi diff --git a/barretenberg/cpp/pil/vm2/alu.pil b/barretenberg/cpp/pil/vm2/alu.pil index 0508b6e8c857..e95b42e3675d 100644 --- a/barretenberg/cpp/pil/vm2/alu.pil +++ b/barretenberg/cpp/pil/vm2/alu.pil @@ -14,6 +14,7 @@ pol commit sel_op_add; pol commit sel_op_sub; pol commit sel_op_mul; pol commit sel_op_div; +pol commit sel_op_fdiv; pol commit sel_op_eq; pol commit sel_op_lt; pol commit sel_op_lte; @@ -38,7 +39,7 @@ sel = 0; pol commit cf; // Generic helper column -// Current use: EQ (inverse of a-b) & DIV (remainder) +// Current use: EQ (inverse of a-b), DIV (remainder), and SHL (2**ib) pol commit helper1; // maximum bits the number can hold (i.e. 8 for a u8): @@ -70,6 +71,7 @@ op_id = sel_op_add * constants.AVM_EXEC_OP_ID_ALU_ADD + sel_op_sub * constants.AVM_EXEC_OP_ID_ALU_SUB + sel_op_mul * constants.AVM_EXEC_OP_ID_ALU_MUL + sel_op_div * constants.AVM_EXEC_OP_ID_ALU_DIV + + sel_op_fdiv * constants.AVM_EXEC_OP_ID_ALU_FDIV + sel_op_eq * constants.AVM_EXEC_OP_ID_ALU_EQ + sel_op_lt * constants.AVM_EXEC_OP_ID_ALU_LT + sel_op_lte * constants.AVM_EXEC_OP_ID_ALU_LTE @@ -93,7 +95,7 @@ execution.sel_execute_alu { // IS_FF CHECKING -pol CHECK_TAG_FF = sel_op_div + sel_op_lt + sel_op_lte + sel_op_not; +pol CHECK_TAG_FF = sel_op_div + sel_op_fdiv + sel_op_lt + sel_op_lte + sel_op_not + sel_shift_ops; // We prove that sel_is_ff == 1 <==> ia_tag == MEM_TAG_FF pol TAG_FF_DIFF = ia_tag - constants.MEM_TAG_FF; pol commit tag_ff_diff_inv; @@ -111,27 +113,26 @@ CHECK_TAG_U128 * (TAG_U128_DIFF * (sel_is_u128 * (1 - tag_u128_diff_inv) + tag_u // TAG CHECKING -// Will become e.g. sel_op_add * ia_tag + (comparison ops) * MEM_TAG_U1 + .... -pol EXPECTED_C_TAG = (sel_op_add + sel_op_sub + sel_op_mul + sel_op_div + sel_op_truncate) * ia_tag + (sel_op_eq + sel_op_lt + sel_op_lte) * constants.MEM_TAG_U1; +pol EXPECTED_C_TAG = (sel_op_add + sel_op_sub + sel_op_mul + sel_op_div + sel_op_truncate + sel_shift_ops) * ia_tag + (sel_op_eq + sel_op_lt + sel_op_lte) * constants.MEM_TAG_U1 + sel_op_fdiv * constants.MEM_TAG_FF; // The tag of c is generated by the opcode and is never wrong. // Gating with (1 - sel_tag_err) is necessary because when an error occurs, we have to set the tag to 0, // which might not be equal to EXPECTED_C_TAG. #[C_TAG_CHECK] -(1 - sel_tag_err) * (EXPECTED_C_TAG - ic_tag) = 0; +(1 - sel_err) * (EXPECTED_C_TAG - ic_tag) = 0; pol commit sel_tag_err; sel_tag_err * (1 - sel_tag_err) = 0; // Tag errors currently have cases: -// 1. Input tagged as a field for NOT or DIV operations +// 1. Input tagged as a field for NOT, DIV, or shift operations or non-field for FDIV operation // 2. Mismatched tags for inputs a and b for all opcodes apart from TRUNC // 1 is handled by checking FF_TAG_ERR in TAG_ERR_CHECK and 2 is handled in AB_TAGS_CHECK. -pol FF_TAG_ERR = (sel_op_div + sel_op_not) * sel_is_ff; +pol FF_TAG_ERR = (sel_op_div + sel_op_not + sel_shift_ops) * sel_is_ff + sel_op_fdiv * IS_NOT_FF; pol commit sel_ab_tag_mismatch; sel_ab_tag_mismatch * (1 - sel_ab_tag_mismatch) = 0; -// TODO(MW): It's technically possible to have BOTH cases be true if we perform a DIV with FF ib and integer ia, +// TODO(MW): It's technically possible to have BOTH cases be true if we perform a DIV or shift with FF ib and integer ia, // so for now I take sel_ab_tag_mismatch * FF_TAG_ERR. #[TAG_ERR_CHECK] sel_tag_err = sel_ab_tag_mismatch + FF_TAG_ERR - sel_ab_tag_mismatch * FF_TAG_ERR; @@ -149,6 +150,52 @@ CHECK_AB_TAGS * ( (ia_tag - ib_tag) * ( AB_TAGS_EQ * (1 - ab_tags_diff_inv) + ab #[TAG_MAX_BITS_VALUE] sel { ia_tag, max_bits, max_value } in precomputed.sel_tag_parameters { precomputed.clk, precomputed.tag_max_bits, precomputed.tag_max_value }; +// BIT DECOMPOSITION + +// We use the below to prove correct decomposition of limbs. Currently used by MUL, DIV, SHL, and SHR. +pol commit sel_decompose_a; +// #[OP_ID_CHECK] ensures selectors are mutually exclusive: +sel_decompose_a = sel_mul_div_u128 + sel_shift_ops * IS_NOT_FF; +// Currently, sel_decompose_b would just equal sel_mul_div_u128, so no need for another column. +pol commit a_lo, a_hi, b_lo, b_hi; +pol TWO_POW_64 = 2 ** 64; + +// Reusing columns for decomposition (#[OP_ID_CHECK] ensures selectors are mutually exclusive): +pol DECOMPOSED_A = ((sel_mul_u128 + sel_shift_ops_no_overflow) * ia) + (sel_shift_ops - sel_shift_ops_no_overflow) * (ib - max_bits) + (sel_is_u128 * sel_op_div * (1 - sel_tag_err) * ic); +pol DECOMPOSED_B = ib; +// For MUL and DIV, we decompose into 64 bit limbs. For shifts, we have one limb of b bits and one limb of max_bits - b bits. +pol LIMB_SIZE = sel_mul_div_u128 * TWO_POW_64 + sel_shift_ops * two_pow_shift_lo_bits; + +#[A_DECOMPOSITION] +sel_decompose_a * (DECOMPOSED_A - (a_lo + LIMB_SIZE * a_hi)) = 0; +#[B_DECOMPOSITION] +sel_mul_div_u128 * (DECOMPOSED_B - (b_lo + LIMB_SIZE * b_hi)) = 0; + +// Note: the only current use for decomposition of b has 64 bit limbs, so no need for b_lo/hi_bits. +pol commit a_lo_bits, a_hi_bits; +// TODO: Once lookups support expression in tuple, we can inline constant_64 into the lookup. +// Note: only currently used for MUL/DIV u128, so gated by sel_mul_div_u128: +pol commit constant_64; +sel_mul_div_u128 * (64 - constant_64) = 0; + +#[A_LO_BITS] +a_lo_bits - sel_mul_div_u128 * constant_64 - sel_shift_ops * shift_lo_bits = 0; + +#[A_HI_BITS] +a_hi_bits - sel_mul_div_u128 * constant_64 - sel_shift_ops * SHIFT_HI_BITS = 0; + +#[RANGE_CHECK_DECOMPOSITION_A_LO] +sel_decompose_a { a_lo, a_lo_bits } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; + +#[RANGE_CHECK_DECOMPOSITION_A_HI] +sel_decompose_a { a_hi, a_hi_bits } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; + +#[RANGE_CHECK_DECOMPOSITION_B_LO] +sel_mul_div_u128 { b_lo, constant_64 } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; + +#[RANGE_CHECK_DECOMPOSITION_B_HI] +sel_mul_div_u128 { b_hi, constant_64 } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; + // ADD @@ -193,21 +240,6 @@ sel_mul_u128 = sel_is_u128 * sel_op_mul; // a * b_l + a_l * b_h * 2^64 = (cf * 2^64 + c_hi) * 2^128 + c // => no need for a_h in final relation -pol commit a_lo; -pol commit a_hi; -pol commit b_lo; -pol commit b_hi; -pol TWO_POW_64 = 2 ** 64; - -// Reusing columns for decomposition (#[OP_ID_CHECK] ensures selectors are mutually exclusive): -pol DECOMPOSED_A = (sel_mul_u128 * ia) + (sel_is_u128 * sel_op_div * (1 - sel_tag_err) * ic); -pol DECOMPOSED_B = ib; - -#[A_DECOMPOSITION] -sel_mul_div_u128 * (DECOMPOSED_A - (a_lo + TWO_POW_64 * a_hi)) = 0; -#[B_DECOMPOSITION] -sel_mul_div_u128 * (DECOMPOSED_B - (b_lo + TWO_POW_64 * b_hi)) = 0; - #[ALU_MUL_U128] sel_mul_u128 * (1 - sel_tag_err) * ( @@ -216,67 +248,23 @@ sel_mul_u128 * (1 - sel_tag_err) - (max_value + 1) * (cf * TWO_POW_64 + c_hi) // c_hi * 2^128 + (cf ? 2^192 : 0) ) = 0; -// TODO: Once lookups support expression in tuple, we can inline constant_64 into the lookup. -// Note: only used for MUL/DIV u128, so gated by sel_mul_div_u128 -pol commit constant_64; -sel_mul_div_u128 * (64 - constant_64) = 0; - -#[RANGE_CHECK_MUL_U128_A_LO] -sel_mul_div_u128 { a_lo, constant_64 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; - -#[RANGE_CHECK_MUL_U128_A_HI] -sel_mul_div_u128 { a_hi, constant_64 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; - -#[RANGE_CHECK_MUL_U128_B_LO] -sel_mul_div_u128 { b_lo, constant_64 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; - -#[RANGE_CHECK_MUL_U128_B_HI] -sel_mul_div_u128 { b_hi, constant_64 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; - // No need to range_check c_hi for cases other than u128 because we know a and b's size from the tags and have looked // up max_value. i.e. we cannot provide a malicious c, c_hi such that a + b - c_hi * 2^n = c passes for n < 128. // No need to range_check c_lo = ic because the memory write will ensure ic <= max_value. #[RANGE_CHECK_MUL_U128_C_HI] -sel_mul_u128 { c_hi, constant_64 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_mul_u128 { c_hi, constant_64 } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; // DIV sel_op_div * (1 - sel_op_div) = 0; -pol commit sel_div_0_err; -sel_div_0_err * (1 - sel_div_0_err) = 0; - -// We prove that sel_div_0_err == 1 <==> sel_op_div == 1 && b == 0: -// sel_div_0_err == 1 <==> b == 0: ib * (sel_div_0_err * (1 - b_inv) + b_inv) + sel_div_0_err - 1 = 0 -// sel_div_0_err == 1 => sel_op_div == 1: sel_div_0_err * ( sel_div_0_err - sel_op_div ) = 0 - -// Proof: -// ==>: -// Assume sel_div_0_err == 1. Then #[DIV_0_ERR] becomes: sel_op_div * ib + 1 - sel_op_div = 0 -// We note that sel_op_div cannot be zero, otherwise we get 1 == 0. -// Since sel_op_div is boolean, sel_op_div == 1 and ib == 0 -// <== : -// Assume sel_op_div == 1 && ib == 0, we get for #[DIV_0_ERR]: sel_div_0_err - 1 + sel_div_0_err * (sel_div_0_err - 1) = 0 -// sel_div_0_err being boolean the second additive term is zero and therefore sel_div_0_err - 1 == 0. -pol commit b_inv; -#[DIV_0_ERR] -sel_op_div * (ib * (sel_div_0_err * (1 - b_inv) + b_inv) + sel_div_0_err - 1) + sel_div_0_err * ( sel_div_0_err - sel_op_div ) = 0; - // We need to show that remainder < b (remainder stored in helper1) // TODO(MW): Reuse #[INT_GT] by setting lt_ops_input_a = b, lt_ops_input_b = helper1, lt_ops_result_c = 1 (gated by div_0_err). // The below only exists to gate this lookup for now: pol commit sel_div_no_0_err; sel_div_no_0_err = sel_op_div * (1 - sel_div_0_err); #[GT_DIV_REMAINDER] -sel_div_no_0_err { ib, helper1, sel_op_div } in gt.sel { gt.input_a, gt.input_b, gt.res }; - -// DIV - non u128 - -// Show a - remainder = b * c -// Note: Since a, b, c, and remainder are under 64 bits, we are protected against a - remainder underflowing, -// i.e. b * c (< 128bits) cannot equal underflowed a - remainder (> 253 bits) -#[ALU_DIV_NON_U128] -sel_op_div * IS_NOT_U128 * (1 - sel_err) * (ib * ic - (ia - helper1)) = 0; +sel_div_no_0_err { ib, helper1, sel_op_div } in gt.sel_alu { gt.input_a, gt.input_b, gt.res }; // DIV - u128 @@ -311,6 +299,46 @@ sel_is_u128 * sel_op_div * (1 - sel_err) * a_hi * b_hi = 0; #[ALU_DIV_U128] sel_is_u128 * sel_op_div * (1 - sel_err) * (ic * b_lo + a_lo * b_hi * TWO_POW_64 - (ia - helper1)) = 0; +// FDIV + +sel_op_fdiv * (1 - sel_op_fdiv) = 0; + +// For FDIV, we reuse the same main relation for non-u128 DIV (ALU_FDIV_DIV_NON_U128) but constrain that there is no remainder, stored in helper1. +// => The relation shows that b * c = a for FF + +// DIV & FDIV - shared ops +// #[OP_ID_CHECK] ensures selectors are mutually exclusive: +pol DIV_OPS = sel_op_div + sel_op_fdiv; + +pol commit sel_div_0_err; +sel_div_0_err * (1 - sel_div_0_err) = 0; + +// We prove that sel_div_0_err == 1 <==> DIV_OPS == 1 && b == 0: +// sel_div_0_err == 1 <==> b == 0: ib * (sel_div_0_err * (1 - b_inv) + b_inv) + sel_div_0_err - 1 = 0 +// sel_div_0_err == 1 => DIV_OPS == 1: sel_div_0_err * ( sel_div_0_err - DIV_OPS ) = 0 + +// Proof: +// ==>: +// Assume sel_div_0_err == 1. Then #[DIV_0_ERR] becomes: DIV_OPS * ib + 1 - DIV_OPS = 0 +// We note that DIV_OPS cannot be zero, otherwise we get 1 == 0. +// Since DIV_OPS is boolean, DIV_OPS == 1 and ib == 0 +// <== : +// Assume DIV_OPS == 1 && ib == 0, we get for #[DIV_0_ERR]: sel_div_0_err - 1 + sel_div_0_err * (sel_div_0_err - 1) = 0 +// sel_div_0_err being boolean the second additive term is zero and therefore sel_div_0_err - 1 == 0. +pol commit b_inv; +#[DIV_0_ERR] +DIV_OPS * (ib * (sel_div_0_err * (1 - b_inv) + b_inv) + sel_div_0_err - 1) + sel_div_0_err * (sel_div_0_err - DIV_OPS) = 0; + +// DIV & FDIV - non u128 + +pol DIV_OPS_NON_U128 = (1 - sel_err) * (sel_op_fdiv + sel_op_div * IS_NOT_U128); + +// Show a - remainder = b * c +// Note: Since a, b, c, and remainder are under 64 bits (or remainder == 0 for FF), we are protected against a - remainder underflowing, +// i.e. b * c (< 128bits) cannot equal underflowed a - remainder (> 253 bits) +#[ALU_FDIV_DIV_NON_U128] +DIV_OPS_NON_U128 * (ib * ic - (ia - sel_op_div * helper1)) = 0; + // EQ sel_op_eq * (1 - sel_op_eq) = 0; @@ -367,7 +395,7 @@ in ff_gt.sel_gt { ff_gt.a, ff_gt.b, ff_gt.result }; #[INT_GT] sel_int_lt_ops { lt_ops_input_a, lt_ops_input_b, lt_ops_result_c } -in gt.sel { gt.input_a, gt.input_b, gt.res }; +in gt.sel_alu { gt.input_a, gt.input_b, gt.res }; // NOT // Input is sent to ia, ia_tag and output is sent to ib, ib_tag. @@ -378,6 +406,87 @@ sel_op_not * (1 - sel_op_not) = 0; #[NOT_OP_MAIN] sel_op_not * (1 - sel_tag_err) * (ia + ib - max_value) = 0; +// SHIFTS - Taken from vm1: +// Given (1) an input a, within the range [0, 2**128-1], +// (2) a value s, the amount of bits to shift a by (stored in ib), +// (3) and a memory tag, mem_tag that supports a maximum of t bits (stored in max_bits). +// Split input a into Big Endian hi and lo limbs, (we re-use the a_hi and a_lo columns we used for the MUL/DIV u128 operators) +// a_hi and a_lo, and the number of bits represented by the memory tag, t. +// If we are shifting by more than the bit length represented by the memory tag, the result is trivially zero. + +// SHL + +// === Steps when performing SHL +// (1) Prove the correct decomposition: a_hi * 2**(t-s) + a_lo = a ---> see #[A_DECOMPOSITION] +// (2) Range check a_hi < 2**s && a_lo < 2**(t-s) ---> see #[RANGE_CHECK_DECOMPOSITION_A_LO/HI] +// (3) Return a_lo * 2**s ---> see #[ALU_SHL] +// +// <-- s bits --> | <-- (t-s) bits --> +// ------------------|------------------- +// | a_hi | a_lo | --> a +// -------------------------------------- +// +// Use of helper1 for SHL: +// We have: s (=ib), t (=max_bits), 2**(t-s) (=two_pow_shift_lo_bits), and 2**t (=max_value + 1) +// We want: 2**s (=2**ib), ideally without another precomputed.power_of_2 lookup +// Injecting 2**s (=helper1), we can check that 2**t == 2**(t-s) * 2**s: +#[SHL_TWO_POW_SHIFT] +sel_op_shl * sel_shift_ops_no_overflow * (1 - sel_tag_err) * (max_value + 1 - two_pow_shift_lo_bits * helper1) = 0; + +#[ALU_SHL] +sel_op_shl * (1 - sel_tag_err) * (ic - sel_shift_ops_no_overflow * a_lo * helper1 ) = 0; + +// SHR + +// === Steps when performing SHR +// (1) Prove the correct decomposition: a_hi * 2**s + a_lo = a ---> see #[A_DECOMPOSITION] +// (2) Range check a_hi < 2**(t-s) && a_lo < 2**s ---> see #[RANGE_CHECK_DECOMPOSITION_A_LO/HI] +// (3) Return a_hi ---> see #[ALU_SHR] +// +// <--(t-s) bits --> | <-- s bits --> +// -------------------|------------------- +// | a_hi | a_lo | --> a +// --------------------------------------- + +#[ALU_SHR] +sel_op_shr * (1 - sel_tag_err) * (ic - sel_shift_ops_no_overflow * a_hi) = 0; + +// SHL & SHR - Shared relations: + +pol commit sel_shift_ops; +// sel_op_shl || sel_op_shr: +sel_shift_ops = sel_op_shl + sel_op_shr; + +pol commit sel_shift_ops_no_overflow; +// sel_shift_ops_no_overflow = 1 ==> sel_shift_ops = 1: +sel_shift_ops_no_overflow * (1 - sel_shift_ops) = 0; +// (sel_op_shl || sel_op_shr) & b < max_bits: see below* for constraining. +pol SHIFT_OVERFLOW = sel_shift_ops * (1 - sel_shift_ops_no_overflow); + +// The bit size of the lo limb used by the shift: +pol commit shift_lo_bits; +pol commit two_pow_shift_lo_bits; + +// *For SHL and SHR, when the shift (b) > max_bits we want SHIFT_OVERFLOW == 1 and c == 0: +// SHL: a_lo_bits = max_bits - b -> will underflow +// SHR: a_hi_bits = max_bits - b -> will underflow +// so instead set a_lo = b - max_bits and shift_lo_bits = max_bits for both SHL and SHR (see DECOMPOSED_A) and reuse the range check +// RANGE_CHECK_DECOMPOSITION_A_LO to prove that b > max_bits <==> SHIFT_OVERFLOW = 1 <==> c = 0. +// Note: sel_decompose_a is gated by IS_NOT_FF, so no gating for the FF tag error case is required below. + +#[SHIFTS_LO_BITS] +shift_lo_bits + - sel_shift_ops_no_overflow * (sel_op_shl * (max_bits - ib) + sel_op_shr * ib) + - SHIFT_OVERFLOW * max_bits + = 0; + +// Set shift_hi_bits = max_bits in the overflow case, so RANGE_CHECK_DECOMPOSITION_A_HI passes. Since we set c == 0 in this case, +// we don't need to constrain that a_hi is within a certain limb size. +pol SHIFT_HI_BITS = max_bits - sel_shift_ops_no_overflow * shift_lo_bits; + +#[SHIFTS_TWO_POW] +sel_shift_ops_no_overflow { shift_lo_bits, two_pow_shift_lo_bits } in precomputed.sel_range_8 { precomputed.clk, precomputed.power_of_2 }; + // TRUNCATE (ALU part for opcodes CAST and SET) // Input of truncation value is sent to ia, destination tag in ia_tag and output is computed as ic. // We have one dispatching lookup from execution specific to CAST and another one for SET, as @@ -404,9 +513,9 @@ sel_op_truncate * (1 - sel_op_truncate) = 0; // Note that we enforce the output tag through this dispatching lookup by having ia_tag matching both mem_tag_reg[1] and rop[2]. #[EXEC_DISPATCHING_CAST] execution.sel_execute_cast { - execution.register[0], execution.rop[2], execution.subtrace_operation_id, execution.register[1], execution.mem_tag_reg[1] + execution.register[0], execution.rop[2], execution.subtrace_operation_id, execution.register[1], execution.mem_tag_reg[1], execution.sel_opcode_error } in sel_op_truncate { - ia, ia_tag, op_id, ic, ia_tag + ia, ia_tag, op_id, ic, ia_tag, /*sel_opcode_error=*/ precomputed.zero }; // SET DISPATCHING @@ -425,9 +534,9 @@ execution.sel_execute_cast { // Note that we enforce the output tag through this dispatching lookup by having ia_tag matching both mem_tag_reg[0] and rop[1]. #[EXEC_DISPATCHING_SET] execution.sel_execute_set { - execution.rop[2], execution.rop[1], execution.subtrace_operation_id, execution.register[0], execution.mem_tag_reg[0] + execution.rop[2], execution.rop[1], execution.subtrace_operation_id, execution.register[0], execution.mem_tag_reg[0], execution.sel_opcode_error } in sel_op_truncate { - ia, ia_tag, op_id, ic, ic_tag + ia, ia_tag, op_id, ic, ic_tag, /*sel_opcode_error=*/ precomputed.zero }; // 3 cases for truncation: @@ -489,4 +598,4 @@ mid_bits = sel_trunc_non_trivial * (128 - max_bits); // is supported by our range_check gadget. // No need to range_check ic because the memory write will ensure ic <= max_value. #[RANGE_CHECK_TRUNC_MID] -sel_trunc_non_trivial { mid, mid_bits } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_trunc_non_trivial { mid, mid_bits } in range_check.sel_alu { range_check.value, range_check.rng_chk_bits }; diff --git a/barretenberg/cpp/pil/vm2/bitwise.pil b/barretenberg/cpp/pil/vm2/bitwise.pil index c8f0f98bb54c..221c495bda34 100644 --- a/barretenberg/cpp/pil/vm2/bitwise.pil +++ b/barretenberg/cpp/pil/vm2/bitwise.pil @@ -48,12 +48,19 @@ sel * (1 - sel) = 0; // No relations will be checked if this identity is satisfied. #[skippable_if] -sel + last = 0; // They are both boolean so it corresponds to sel == 0 AND last == 0. +sel = 0; pol commit start; // Identifies when we want to capture the output to the main trace. // Must be constrained as a boolean as any selector used in a lookup/permutation. start * (1 - start) = 0; +// This is used to decouple generation of inverses of lookups into this trace. +pol commit start_keccak; +pol commit start_sha256; +start_keccak * (1 - start_keccak) = 0; +// If any of the above selectors is 1, then start must be 1. +(start_keccak + start_sha256) * (1 - start) = 0; + // To support dynamically sized memory operands we use a counter against a lookup // This decrementing counter goes from [TAG_LEN, 0] where TAG_LEN is the number of bytes in the // corresponding integer. i.e. TAG_LEN is between 1 (U1/U8) and 16 (U128). diff --git a/barretenberg/cpp/pil/vm2/bytecode/address_derivation.pil b/barretenberg/cpp/pil/vm2/bytecode/address_derivation.pil index 310dd5c08368..2edfa36c967c 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/address_derivation.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/address_derivation.pil @@ -39,9 +39,19 @@ namespace address_derivation; pol commit partial_address_domain_separator; sel * (partial_address_domain_separator - constants.GENERATOR_INDEX__PARTIAL_ADDRESS) = 0; + // TODO: We need these temporarily while we dont allow for aliases in the lookup tuple + pol commit const_two; + sel * (const_two - 2) = 0; + pol commit const_three; + sel * (const_three - 3) = 0; + pol commit const_four; + sel * (const_four - 4) = 0; + pol commit const_five; + sel * (const_five - 5) = 0; + #[SALTED_INITIALIZATION_HASH_POSEIDON2_0] - sel { partial_address_domain_separator, salt, init_hash, salted_init_hash } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { partial_address_domain_separator, salt, init_hash, salted_init_hash, const_two } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[SALTED_INITIALIZATION_HASH_POSEIDON2_1] sel { deployer_addr, precomputed.zero, precomputed.zero, salted_init_hash } @@ -53,8 +63,8 @@ namespace address_derivation; pol commit partial_address; #[PARTIAL_ADDRESS_POSEIDON2] - sel { partial_address_domain_separator, class_id, salted_init_hash, partial_address } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { sel /* =1 */, partial_address_domain_separator, class_id, salted_init_hash, partial_address } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; // Hash the public keys @@ -68,20 +78,20 @@ namespace address_derivation; // Remove all the 0s for is_infinite when removed from public_keys.nr // https://github.com/AztecProtocol/aztec-packages/issues/7529 #[PUBLIC_KEYS_HASH_POSEIDON2_0] - sel { public_keys_hash_domain_separator, nullifier_key_x, nullifier_key_y, public_keys_hash } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { public_keys_hash_domain_separator, nullifier_key_x, nullifier_key_y, public_keys_hash, const_five } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[PUBLIC_KEYS_HASH_POSEIDON2_1] - sel { precomputed.zero, incoming_viewing_key_x, incoming_viewing_key_y, public_keys_hash } - in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { precomputed.zero, incoming_viewing_key_x, incoming_viewing_key_y, public_keys_hash, const_four } + in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[PUBLIC_KEYS_HASH_POSEIDON2_2] - sel { precomputed.zero, outgoing_viewing_key_x, outgoing_viewing_key_y, public_keys_hash } - in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { precomputed.zero, outgoing_viewing_key_x, outgoing_viewing_key_y, public_keys_hash, const_three } + in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[PUBLIC_KEYS_HASH_POSEIDON2_3] - sel { precomputed.zero, tagging_key_x, tagging_key_y, public_keys_hash } - in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { precomputed.zero, tagging_key_x, tagging_key_y, public_keys_hash, const_two } + in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[PUBLIC_KEYS_HASH_POSEIDON2_4] sel { precomputed.zero, precomputed.zero, precomputed.zero, public_keys_hash } @@ -97,8 +107,8 @@ namespace address_derivation; sel * (preaddress_domain_separator - constants.GENERATOR_INDEX__CONTRACT_ADDRESS_V1) = 0; #[PREADDRESS_POSEIDON2] - sel { preaddress_domain_separator, public_keys_hash, partial_address, preaddress } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { sel /* =1 */, preaddress_domain_separator, public_keys_hash, partial_address, preaddress } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; // Derive preaddress public key diff --git a/barretenberg/cpp/pil/vm2/bytecode/bc_decomposition.pil b/barretenberg/cpp/pil/vm2/bytecode/bc_decomposition.pil index faf38fcf0b82..d629be2309a4 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/bc_decomposition.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/bc_decomposition.pil @@ -59,11 +59,11 @@ pol commit bytes_rem_min_one_inv; sel * ((bytes_remaining - 1) * (last_of_contract * (1 - bytes_rem_min_one_inv) + bytes_rem_min_one_inv) + last_of_contract - 1) = 0; // The following alias is a boolean because last_of_contract == 1 ==> sel == 1 ==> first_row == 0 (sel' is defined) -pol FIRST_OR_LAST_CONTRACT = precomputed.first_row + last_of_contract; +pol LATCH_CONDITION = precomputed.first_row + last_of_contract; // Initialization of pc per bytecode to zero #[BC_DEC_PC_ZERO_INITIALIZATION] -FIRST_OR_LAST_CONTRACT * pc' = 0; +LATCH_CONDITION * pc' = 0; // pc evolution (increment within bytecode) #[BC_DEC_PC_INCREMENT] @@ -77,7 +77,7 @@ sel * (1 - last_of_contract) * (bytes_remaining' - bytes_remaining + 1) = 0; // for a given instruction (pair [pc, bytecode_id]). Otherwise, instruction fetching might // be misled to copy the wrong instruction from this subtrace. #[BC_DEC_ID_CONSTANT] -(1 - FIRST_OR_LAST_CONTRACT) * (id' - id) = 0; +(1 - LATCH_CONDITION) * (id' - id) = 0; // This constrains that the bytes are in the range 0 to 255. #[BYTES_ARE_BYTES] @@ -132,107 +132,130 @@ pol commit bytes_pc_plus_1, bytes_pc_plus_2, bytes_pc_plus_3, bytes_pc_plus_4, b // 39 | 1 | 1 | 0xD | 0x0 | 0x0 | ... pol commit bytes_to_read; -pol commit sel_overflow_correction_needed; -sel_overflow_correction_needed * (1 - sel_overflow_correction_needed) = 0; +pol commit sel_windows_gt_remaining; +sel_windows_gt_remaining * (1 - sel_windows_gt_remaining) = 0; // We need to constrain bytes_to_read = min(WINDOW_SIZE, bytes_remaining) which is equal to // bytes_remaining if bytes_remaining <= WINDOW_SIZE and WINDOW_SIZE otherwise. -// Absolute value of WINDOW_SIZE - bytes_remaining -pol commit abs_diff; -// Remark: The factor sel in relation below is only required to use the skippable mechanism. -// sel_overflow_correction_needed = 1 if bytes_remaining < WINDOW_SIZE and -// sel_overflow_correction_needed = 0 if bytes_remaining > WINDOW_SIZE -#[BC_DEC_ABS_DIFF] -sel * (2 * sel_overflow_correction_needed * (WINDOW_SIZE - bytes_remaining) - WINDOW_SIZE + bytes_remaining - abs_diff) = 0; +pol commit is_windows_eq_remaining; +pol commit windows_min_remaining_inv; +is_windows_eq_remaining * (1 - is_windows_eq_remaining) = 0; -// We prove that the abs_diff is positive (and therefore sel_overflow_correction_needed correct) over the integers -// using a range check over 24 bits. We know that WINDOWS_SIZE fits into 16 bits and bytes_remaining cannot be larger -// than the trace size 2^21 (and bytecode hashing/validation could not pass). This provides guarantee that -// abs_diff cannot be the result of an underflow. This would be only possible for bytes_remaining being very close -// to the field order. +// bytes_remaining == WINDOW_SIZE <==> is_windows_eq_remaining == 1 +#[IS_WINDOWS_EQ_REMAINING] +sel * ((WINDOW_SIZE - bytes_remaining) * (is_windows_eq_remaining * (1 - windows_min_remaining_inv) + windows_min_remaining_inv) + is_windows_eq_remaining - 1) = 0; -pol commit abs_diff_lo; -pol commit abs_diff_hi; -abs_diff = abs_diff_hi * 2**16 + abs_diff_lo; -#[ABS_DIFF_LO_IS_U16] -sel { abs_diff_lo } in precomputed.sel_range_16 { precomputed.clk }; +// We want to constrain sel_windows_gt_remaining as follows: +// sel_windows_gt_remaining = 1 if bytes_remaining < WINDOW_SIZE and +// sel_windows_gt_remaining = 0 if bytes_remaining >= WINDOW_SIZE +// +// Instead of using range-checks/GT gadget to show an inequality, we leverage on the fact that bytes_remaining +// is a decrementing counter. +// So the pattern for sel_windows_gt_remaining bottom-up is: 1, 1, 1, ..., 1, 0, 0, 0, ... +// where first 0 entry is at the row where bytes_remaining == WINDOW_SIZE. +// +// We initialize sel_windows_gt_remaining = 1 on last row of the bytecode +// and propagate the boolean value to the above row whenever bytes_remaining != WINDOW_SIZE. +// If we encounter the row where bytes_remaining == WINDOW_SIZE, we swap the boolean value. +// Whenever the bytecode is smaller than WINDOW_SIZE, we exclusively propagate the value 1. + +// Set sel_windows_gt_remaining = 1 on the last row of the bytecode. +#[SEL_WINDOWS_GT_REMAINING_INIT] +last_of_contract * (1 - sel_windows_gt_remaining) = 0; -#[ABS_DIFF_HI_IS_U8] -sel { abs_diff_hi } in precomputed.sel_range_8 { precomputed.clk }; +// If we reach is_windows_eq_remaining == 1, we pass from 1 to 0. (Decrement a single time.) +// sel_windows_gt_remaining = sel_windows_gt_remaining' - is_windows_eq_remaining +#[SEL_WINDOWS_GT_REMAINING_PROPAGATION] +(1 - LATCH_CONDITION) * (sel_windows_gt_remaining' - is_windows_eq_remaining - sel_windows_gt_remaining) = 0; -#[BC_DEC_OVERFLOW_CORRECTION_VALUE] -sel * ((1 - sel_overflow_correction_needed) * (bytes_to_read - WINDOW_SIZE) + sel_overflow_correction_needed * (bytes_to_read - bytes_remaining)) = 0; +#[SET_BYTES_TO_READ] +sel * ((1 - sel_windows_gt_remaining) * (bytes_to_read - WINDOW_SIZE) + sel_windows_gt_remaining * (bytes_to_read - bytes_remaining)) = 0; // Constrain shifted columns. -// We need to guard with (1 - FIRST_OR_LAST_CONTRACT) because otherwise we would need to copy value +// We need to guard with (1 - LATCH_CONDITION) because otherwise we would need to copy value // from another bytecode to satisfy the relations. The issue is then that it would pollute // the value packed_field and would lead computing the wrong hash in bytecode hashing. -// By writing the following relations in the form bytes_pc_plus_i = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_{i-1}' -// rather than (1 - FIRST_OR_LAST_CONTRACT) * (bytes_pc_plus_i - bytes_pc_plus_{i-1}') = 0 +// By writing the following relations in the form bytes_pc_plus_i = (1 - LATCH_CONDITION) * bytes_pc_plus_{i-1}' +// rather than (1 - LATCH_CONDITION) * (bytes_pc_plus_i - bytes_pc_plus_{i-1}') = 0 // we not only help witness generation to be satisfied but enforce the propagation // of zero values (instead of garbage) whch effectively prevents a malicious prover to // add garbage values beyond the end of the bytecode. This is not strictly necessary // but this property comes at no additional cost. -bytes_pc_plus_1 = (1 - FIRST_OR_LAST_CONTRACT) * bytes'; -bytes_pc_plus_2 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_1'; -bytes_pc_plus_3 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_2'; -bytes_pc_plus_4 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_3'; -bytes_pc_plus_5 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_4'; -bytes_pc_plus_6 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_5'; -bytes_pc_plus_7 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_6'; -bytes_pc_plus_8 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_7'; -bytes_pc_plus_9 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_8'; -bytes_pc_plus_10 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_9'; -bytes_pc_plus_11 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_10'; -bytes_pc_plus_12 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_11'; -bytes_pc_plus_13 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_12'; -bytes_pc_plus_14 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_13'; -bytes_pc_plus_15 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_14'; -bytes_pc_plus_16 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_15'; -bytes_pc_plus_17 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_16'; -bytes_pc_plus_18 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_17'; -bytes_pc_plus_19 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_18'; -bytes_pc_plus_20 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_19'; -bytes_pc_plus_21 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_20'; -bytes_pc_plus_22 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_21'; -bytes_pc_plus_23 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_22'; -bytes_pc_plus_24 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_23'; -bytes_pc_plus_25 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_24'; -bytes_pc_plus_26 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_25'; -bytes_pc_plus_27 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_26'; -bytes_pc_plus_28 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_27'; -bytes_pc_plus_29 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_28'; -bytes_pc_plus_30 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_29'; -bytes_pc_plus_31 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_30'; -bytes_pc_plus_32 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_31'; -bytes_pc_plus_33 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_32'; -bytes_pc_plus_34 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_33'; -bytes_pc_plus_35 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_34'; -bytes_pc_plus_36 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_35'; +bytes_pc_plus_1 = (1 - LATCH_CONDITION) * bytes'; +bytes_pc_plus_2 = (1 - LATCH_CONDITION) * bytes_pc_plus_1'; +bytes_pc_plus_3 = (1 - LATCH_CONDITION) * bytes_pc_plus_2'; +bytes_pc_plus_4 = (1 - LATCH_CONDITION) * bytes_pc_plus_3'; +bytes_pc_plus_5 = (1 - LATCH_CONDITION) * bytes_pc_plus_4'; +bytes_pc_plus_6 = (1 - LATCH_CONDITION) * bytes_pc_plus_5'; +bytes_pc_plus_7 = (1 - LATCH_CONDITION) * bytes_pc_plus_6'; +bytes_pc_plus_8 = (1 - LATCH_CONDITION) * bytes_pc_plus_7'; +bytes_pc_plus_9 = (1 - LATCH_CONDITION) * bytes_pc_plus_8'; +bytes_pc_plus_10 = (1 - LATCH_CONDITION) * bytes_pc_plus_9'; +bytes_pc_plus_11 = (1 - LATCH_CONDITION) * bytes_pc_plus_10'; +bytes_pc_plus_12 = (1 - LATCH_CONDITION) * bytes_pc_plus_11'; +bytes_pc_plus_13 = (1 - LATCH_CONDITION) * bytes_pc_plus_12'; +bytes_pc_plus_14 = (1 - LATCH_CONDITION) * bytes_pc_plus_13'; +bytes_pc_plus_15 = (1 - LATCH_CONDITION) * bytes_pc_plus_14'; +bytes_pc_plus_16 = (1 - LATCH_CONDITION) * bytes_pc_plus_15'; +bytes_pc_plus_17 = (1 - LATCH_CONDITION) * bytes_pc_plus_16'; +bytes_pc_plus_18 = (1 - LATCH_CONDITION) * bytes_pc_plus_17'; +bytes_pc_plus_19 = (1 - LATCH_CONDITION) * bytes_pc_plus_18'; +bytes_pc_plus_20 = (1 - LATCH_CONDITION) * bytes_pc_plus_19'; +bytes_pc_plus_21 = (1 - LATCH_CONDITION) * bytes_pc_plus_20'; +bytes_pc_plus_22 = (1 - LATCH_CONDITION) * bytes_pc_plus_21'; +bytes_pc_plus_23 = (1 - LATCH_CONDITION) * bytes_pc_plus_22'; +bytes_pc_plus_24 = (1 - LATCH_CONDITION) * bytes_pc_plus_23'; +bytes_pc_plus_25 = (1 - LATCH_CONDITION) * bytes_pc_plus_24'; +bytes_pc_plus_26 = (1 - LATCH_CONDITION) * bytes_pc_plus_25'; +bytes_pc_plus_27 = (1 - LATCH_CONDITION) * bytes_pc_plus_26'; +bytes_pc_plus_28 = (1 - LATCH_CONDITION) * bytes_pc_plus_27'; +bytes_pc_plus_29 = (1 - LATCH_CONDITION) * bytes_pc_plus_28'; +bytes_pc_plus_30 = (1 - LATCH_CONDITION) * bytes_pc_plus_29'; +bytes_pc_plus_31 = (1 - LATCH_CONDITION) * bytes_pc_plus_30'; +bytes_pc_plus_32 = (1 - LATCH_CONDITION) * bytes_pc_plus_31'; +bytes_pc_plus_33 = (1 - LATCH_CONDITION) * bytes_pc_plus_32'; +bytes_pc_plus_34 = (1 - LATCH_CONDITION) * bytes_pc_plus_33'; +bytes_pc_plus_35 = (1 - LATCH_CONDITION) * bytes_pc_plus_34'; +bytes_pc_plus_36 = (1 - LATCH_CONDITION) * bytes_pc_plus_35'; // For bytecode hashing, we need to re-pack 31 bytes at some PCs into a field. // We will have a selector for the PCs that are packed. This only needs to happen // for PCs 0, 31, 62, ... -// The "sequencer" can choose which PCs to pack, but the bytecode hashing trace -// will use this selector (as 1) in the lookup tuple. Therefore, if the sequencer -// does not choose at least the minimum amount of rows, the lookup will fail. -// NOTE: The bytecode hashing trace will constrain itself that every 31st pc is packed. - -// TODO: We need to ensure that there is a single row per pair (pc, bytecode_id) in -// this subtrace (otherwise, a malicious prover might copy wrong values). -// This is ensured by #[SEL_TOGGLED_AT_PACKED] and #[BC_DEC_PC_INCREMENT] relations but -// still have to implement the unicity on bytecode_id. The current plan is to -// add a permutation between this sub-trace and bc_retrieval.pil of the form: -// last_of_contract {id} is bc_retrieval.sel_XXX {bc_retrieval.bytecode_id} pol commit sel_packed; sel_packed * (1 - sel_packed) = 0; +// Permutation selectors (bc_hashing.pil). +pol commit sel_packed_read[3]; +sel_packed_read[0] * (1 - sel_packed_read[0]) = 0; +sel_packed_read[1] * (1 - sel_packed_read[1]) = 0; +sel_packed_read[2] * (1 - sel_packed_read[2]) = 0; + +#[PACKED_ROW_NEEDS_PERM_SELECTOR] +sel_packed = sel_packed_read[0] + sel_packed_read[1] + sel_packed_read[2]; + #[SEL_TOGGLED_AT_PACKED] sel_packed * (1 - sel) = 0; +// Each first row of a new bytecode id should be packed (#17428): +#[SEL_PACKED_INIT] +LATCH_CONDITION * (sel' - sel_packed') = 0; + +// NOTE: The bytecode hashing trace constrains that every 31st pc is packed, however +// we must duplicate this check here as we do not constrain uniqueness of bytecode ids: +pol commit next_packed_pc; +// next_packed_pc == pc <==> sel_packed == 1 +pol commit next_packed_pc_min_pc_inv; +#[PC_IS_PACKED] +sel * ((next_packed_pc - pc) * (sel_packed * (1 - next_packed_pc_min_pc_inv) + next_packed_pc_min_pc_inv) + sel_packed - 1) = 0; + +// If we reach sel_packed == 1, we increment next_packed_pc by 31. #[SEL_PACKED_INIT] and #[BC_DEC_PC_ZERO_INITIALIZATION] ensure +// we start a new bytecode with sel_packed = 1 and pc = 0, and the below sets next_packed_pc' += 31. +#[NEXT_PACKED_PC_PROPAGATION] +(1 - LATCH_CONDITION) * (next_packed_pc' - (next_packed_pc + sel_packed * 31)) = 0; + // Important: this "field" is not 32 bytes (or 254 bits) long. It is 31 bytes long. pol commit packed_field; diff --git a/barretenberg/cpp/pil/vm2/bytecode/bc_hashing.pil b/barretenberg/cpp/pil/vm2/bytecode/bc_hashing.pil index 7e7ef233f192..01d14d3c7ef1 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/bc_hashing.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/bc_hashing.pil @@ -3,6 +3,7 @@ include "bc_decomposition.pil"; include "../precomputed.pil"; include "../poseidon2_hash.pil"; +include "../constants_gen.pil"; // ########### // Design idea @@ -15,15 +16,16 @@ include "../poseidon2_hash.pil"; // Finally, we copy through a lookup/permutation based on the selector these field elements // to here (bc_hashing.pil) and then proceed to hashing. -// pc_index | bytecode_id | packed_field | incremental_hash | output_hash | latch | -// -----------+-------------+--------------+------------------+--------------------+-------- -// 0 | 0 | 0xabc | len | H(0xabc,len) = 0x2 | 0 -// 31 | 0 | 0x123 | 0x2 | H(0x123,0x2) = 0x3 | 0 -// 62 | 0 | 0x62d | 0x3 | H(0x62d,0x3) = end | 1 -// 0 | 1 | 0x4ab | len | H(0x4ab,len) = 0x5 | 0 -// 31 | 1 | 0x21f | 0x5 | H(0x21f,0x5) = 0x6 | 0 -// 62 | 2 | 0x12a | 0x6 | H(0x12a,0x6) = 0x9 | 0 -// 93 | 2 | 0x982 | 0x9 | H(0x982,0x9) = end | 1 +// bc_hash_0 = H([sep, 0x111, 0x222, 0x333, 0x444, 0x555, 0x666, 0x777]) +// bc_hash_1 = H([sep, 0xaaa, 0xbbb, 0xccc]) +// p_f = packed_fields: +// pc_index | pc_index_1 | pc_index_2 | bytecode_id | p_f_0 | p_f_1 | p_f_2 | output_hash | latch | START | rounds_rem | padding | +// -----------+-------------+-------------+-------------+---------+---------+---------+---------------+-------+-------+------------+---------+ +// 0 | 0 | 31 | 0 | sep | 0x111 | 0x222 | bc_hash_0 | 0 | 1 | 3 | 0 | +// 62 | 93 | 124 | 0 | 0x333 | 0x444 | 0x555 | bc_hash_0 | 0 | 0 | 2 | 0 | +// 155 | 186 | 217 | 0 | 0x666 | 0x777 | 0 | bc_hash_0 | 1 | 0 | 1 | 1 | +// 0 | 0 | 31 | 1 | sep | 0xaaa | 0xbbb | bc_hash_1 | 0 | 1 | 2 | 0 | +// 62 | 93 | 124 | 1 | 0xccc | 0 | 0 | bc_hash_1 | 1 | 0 | 1 | 2 | // Notes @@ -33,10 +35,16 @@ include "../poseidon2_hash.pil"; // Therefore, a lot of the security assumptions we make here are dependent on the security assumption of the poseidon2 hash (i.e. it is intractable to // find a meaningful collision etc). +// Note that the poseidon2_hash circuit propagates the final output_hash ('output') to each row (not just at 'end') and constrains that they are the same. +// This ensures security by chaining each round to the next and asserting correctness of the hash at the final permutation round. + // To use the example of the latch variable. A prover can place the latch in any row and that corresponding value will be used to test the correctness of the // bytecode hash. We assume it is impossible, given a set of inputs (i0, i1, ..., iN) for the prover to find a subsequence within this input such that // Hash(inputs) == Hash(subsequence). +// This trace, like bc_decomposition, relies on the chaining and inc/decrementing of values per row, so the final output_hash can only be considered correct +// once we have processed all values and checked the final state. This means lookups into this trace should always be gated by latch = 1. + namespace bc_hashing; pol commit sel; @@ -66,68 +74,196 @@ namespace bc_hashing; // to be activated which is impossible on first row.), LATCH_CONDITION is a boolean. pol LATCH_CONDITION = latch + precomputed.first_row; - // The start of a new bytecode id and new set of hashing runs. Needs to be a committed column as it is used in the lookup + // The start of a new bytecode id and new set of hashing runs: pol commit start; start * (1 - start) = 0; - // If the current row is a latch or the first row, the next row should be a start (if it's active) + // Needs to be a committed column as it is used in the lookup: + pol commit sel_not_start; + sel_not_start = sel * (1 - start); + + // If the current row is a latch or the first row, the next row should be a start (if it's active): #[START_AFTER_LATCH] sel' * (start' - LATCH_CONDITION) = 0; // Used as part of the lookup into bytecode decomposition - pol commit pc_index; - // The PC increments by 31 each row as long as the row is not latched, in which case the next pc is zero + pol commit pc_index, pc_index_1, pc_index_2; + // The PC increments by 31*3=93 each row as long as: + // a) the row is not latched, in which case the next pc is zero + // b) we are not at start, in which case we only read 2 fields, and => only increment pc by 31*2=62 #[PC_INCREMENTS] - sel * (pc_index' - ((1 - LATCH_CONDITION) * (31 + pc_index))) = 0; + (sel + precomputed.first_row) * (pc_index' - ((1 - LATCH_CONDITION) * (62 + pc_index + sel_not_start * 31))) = 0; + // At start, we need packed_fields[0] = sep: + #[PC_INCREMENTS_1] + sel * ( pc_index_1 - start * pc_index - sel_not_start * ( pc_index + 31 )) = 0; + #[PC_INCREMENTS_2] + sel * ( pc_index_2 - (pc_index_1 + 31) ) = 0; pol commit bytecode_id; - #[ID_CONSISTENCY] + #[ID_PROPAGATION] (1 - LATCH_CONDITION) * (bytecode_id' - bytecode_id) = 0; - pol commit packed_field; - #[GET_PACKED_FIELD] - sel { pc_index, bytecode_id, packed_field } - in - bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.packed_field }; + // We need 3 packed fields to use as inputs for each poseidon round + pol commit packed_fields_0, packed_fields_1, packed_fields_2; + + // At the start of a new bytecode hash, the initial field has to be the separator, and we skip the lookup: + #[START_IS_SEPARATOR] + start * (packed_fields_0 - constants.GENERATOR_INDEX__PUBLIC_BYTECODE) = 0; + + #[GET_PACKED_FIELD_0] + sel_not_start { pc_index, bytecode_id, packed_fields_0 } + is + bc_decomposition.sel_packed_read[0] { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.packed_field }; + + #[GET_PACKED_FIELD_1] + sel_not_padding_1 { pc_index_1, bytecode_id, packed_fields_1 } + is + bc_decomposition.sel_packed_read[1] { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.packed_field }; + + #[GET_PACKED_FIELD_2] + sel_not_padding_2 { pc_index_2, bytecode_id, packed_fields_2 } + is + bc_decomposition.sel_packed_read[2] { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.packed_field }; + + // Padding + + // We lookup the poseidon inputs in chunks of 3 (to match the pos. perm.), so if the total number of fields hashed is not + // a multiple of 3, we have some padding field values (=0). These will fail the lookups into bc_decomposition. + // Note: packed_fields[0] must never be a padding value and padding can only occur at the last row (i.e. latch = 1). + + // Needs to be committed columns as they are used in the lookups + pol commit sel_not_padding_1, sel_not_padding_2; + sel_not_padding_1 * (1 - sel_not_padding_1) = 0; + sel_not_padding_2 * (1 - sel_not_padding_2) = 0; + + // TODO: Instead of two bools, change to value = 0 (no padding) OR 1 (one field padding) OR 2 (two fields padding)? + // The annoyance is that we need many committed sels to use in/gate lookups. + + // PADDING_1 == 1 <==> packed_fields[1] is a padding value ==> (see #[PADDING_CONSISTENCY]) PADDING_2 == 1 + pol PADDING_1 = sel * (1 - sel_not_padding_1); + // PADDING_2 == 1 <==> packed_fields[2] is a padding value <==> we have any padded values + pol PADDING_2 = sel * (1 - sel_not_padding_2); - // This tracks the incremental bytecode hash after the i-th input - // The first incremental hash of each new bytecode_id is the length of the bytecode - pol commit incremental_hash; + // If packed_fields[1] is a padding value, packed_fields[2] must also be a padding value: + // padding_1 == 1 ==> padding_2 == 1 + #[PADDING_CONSISTENCY] + PADDING_1 * sel_not_padding_2 = 0; + // padding_2 == 1 ==> latch == 1 + #[PADDING_END] + PADDING_2 * (1 - latch) = 0; + // padding_1 == 1 ==> packed_fields[1] == 0 + #[PADDED_BY_ZERO_1] + PADDING_1 * packed_fields_1 = 0; + // padding_2 == 1 ==> packed_fields[2] == 0 + #[PADDED_BY_ZERO_2] + PADDING_2 * packed_fields_2 = 0; - // At the start of a new bytecode hash, the initial incremental hash has to be the length of the bytecode - // Note the looked up PC = 0 (enforced by the PC_INCREMENTS relation), i.e. the initial incremental hash value == bytecode length - #[IV_IS_LEN] - start { pc_index, bytecode_id, incremental_hash } - in - bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.bytes_remaining }; + // Value of bc_decomposition.pc at the final field (can be 0 if not at the final row i.e. latch = 0). Needs to be a committed column as it is used in the lookup + pol commit pc_at_final_field; + + // The decomp trace relies on decrementing counters. Instead of using range checks to show that we are at the last field (<==> bc_decomposition.bytes_remaining + // is within expected), we can check that at the claimed final field (i.e. final occurance of sel_packed = 1 in bc_decomp), we have less than 31 bytes remaining. + + // In decomp: sel_windows_gt_remaining = 1 if bytes_remaining < WINDOW_SIZE + // Currently, WINDOW_SIZE = 37 and we encode 31 bytes into each packed field => if sel_windows_gt_remaining = 1 && bytes_pc_plus_31 -> bytes_pc_plus_36 are empty + // then we have no further fields to hash. + + // Example: bytecode of 80 bytes => hash 3 fields + 1 for sep => 2 rows, final row has padding of 2: + // pc_index = 62, packed_fields_0 = final field, packed_fields_1 = padding = 0, packed_fields_2 = padding = 0 + // We want to show packed_fields_0 is the last field of the bytecode (=> our padding is correct). In bc_decomposition at pc_index = 62: + // sel_packed = 1, packed_field = final field, bytes_remaining = 18 + // To avoid using gt/range check to show bytes_remaining < 32, we can instead lookup that sel_windows_gt_remaining = 1 (<==> bytes_remaining < 37) and + // bytes_pc_plus_31 -> bytes_pc_plus_36 are empty. + + // NOTE: This relies on the hardcoded WINDOW_SIZE = 37 and WILL BREAK if this ever changes! + // ASIDE NOTE: A simpler solution would be to lookup that at pc_index - 6 we have sel_windows_gt_remaining = 1 in bc_decomposition, but this would underflow + // if we have a bytecode of under 2 fields (pc_index = 0). + + #[CHECK_FINAL_BYTES_REMAINING] + latch { + pc_at_final_field, + bytecode_id, + sel /* =1 */, + precomputed.zero /* =0 */, + precomputed.zero /* =0 */, + precomputed.zero /* =0 */, + precomputed.zero /* =0 */, + precomputed.zero /* =0 */, + precomputed.zero /* =0 */ + } in bc_decomposition.sel_packed { + bc_decomposition.pc, + bc_decomposition.id, + bc_decomposition.sel_windows_gt_remaining, + bc_decomposition.bytes_pc_plus_31, + bc_decomposition.bytes_pc_plus_32, + bc_decomposition.bytes_pc_plus_33, + bc_decomposition.bytes_pc_plus_34, + bc_decomposition.bytes_pc_plus_35, + bc_decomposition.bytes_pc_plus_36 + }; + + // padding_1 == 1 & padding_2 == 1 <==> pc_at_final_field = pc_index <==> final_bytes_remaining <= 31 + // padding_1 == 0 & padding_2 == 1 <==> pc_at_final_field = pc_index_1 <==> 31 < final_bytes_remaining <= 62 + // padding_1 == 0 & padding_2 == 0 <==> pc_at_final_field = pc_index_2 <==> 62 < bytes_remaining <= 93 + // TODO: Technically when we have no padding (PADDING_2 = 0) we don't need to check the below because the decomp lookups cover us. + // However, this does constrain that when latch == 1, we are definitely at the last field of bc_decomp, I'm not sure whether this is + // required but kept in just in case. + #[PADDING_CORRECTNESS] + pc_at_final_field - latch * ( + PADDING_1 * pc_index + // #[PADDING_CONSISTENCY] constrains that PADDING_1 = 1 ==> PADDING_2 = 1 ==> two padding fields and pc_at_final_field = pc_index + (PADDING_2 - PADDING_1) * pc_index_1 + // one padding field and pc_at_final_field = pc_index_1 + sel_not_padding_2 * pc_index_2 // no padding ==> pc_at_final_field = pc_index_2 + ) = 0; + + // The length of the hashed bytecode in fields, including the prepended separator. We use it to look up into poseidon_2 to ensure that + // the hashed IV matches our bytecode length. + // Note: this is constrained at the final row (latch) by linking it to the pc_at_final_field (see above lookups/relations). + pol commit input_len; + + // Minus one for the separator field not present in bc_decomposition, and one to include the final field (pc_at_final_field marks the beginning of the last field). + // Note: this shouldn't underflow as we don't handle empty bytecode. + #[BYTECODE_LENGTH_FIELDS] + latch * (31 * (input_len - 2) - pc_at_final_field) = 0; + + // The number of rounds (rows) remaining to completely hash the bytecode. + // Like input_len, its correctness is constrained at the final row (latch), where (due to #[CHECK_FINAL_BYTES_REMAINING] and #[PADDING_CORRECTNESS]) + // we know we have reached the end of bytecode processed by bc_decomposition and we must have decremented uniformly until this point. + // We use it to ensure the ordering of poseidon_2 rounds is correct (i.e. a malicious prover cannot swap the order of poseidon rounds). + // (Note that we don't need to constrain this vs input_len because a. input_len is checked vs the incrementing pc values at latch and b. + // the poseidon_2 trace we lookup into links these values anyway) + pol commit rounds_rem; + + // The rounds remaining decrement each row as long as the row is not latched, otherwise rounds_rem == 1 + #[ROUNDS_DECREMENT] + sel * ((1 - LATCH_CONDITION) * (rounds_rem' - rounds_rem + 1) + latch * (rounds_rem - 1)) = 0; - // Start Hashing, Poseidon2(packed_field, running_hash) pol commit output_hash; - // TODO(dbanks12): re-enable once C++ and PIL use standard poseidon2 hashing for bytecode commitments. - //#[POSEIDON2_HASH] - //sel { packed_field, incremental_hash, output_hash } - //in poseidon2_hash.sel { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.output }; - // The output hash has to be incremental_hash of the next row (unless it's latched) - #[CHAIN_OUTPUT_TO_INCR] - (1 - LATCH_CONDITION) * (incremental_hash' - output_hash) = 0; + // Since we constrain the id the same for each row (#[ID_PROPAGATION]) and id == output_hash below, there is no need for #[HASH_PROPAGATION]. + // Note that we must constrain the below at *each row* to ensure that poseidon rows from different hashing traces (i.e. mismatched output_hash, + // matching tuple of inputs and length) are not inserted maliciously: + #[HASH_IS_ID] + sel * (bytecode_id - output_hash) = 0; + + #[POSEIDON2_HASH] + sel { start, latch, packed_fields_0, packed_fields_1, packed_fields_2, input_len, rounds_rem, output_hash } + in poseidon2_hash.sel { poseidon2_hash.start, poseidon2_hash.end, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.input_len, poseidon2_hash.num_perm_rounds_rem, poseidon2_hash.output }; // ######################################################################################### // Proof Sketch // ######################################################################################### - // We want to show that the output_hash at a row with latch == 1 correctly enforces that it - // is the result of hashing the bytes of a given bytecode identified by bytecode_id. - // Thanks to #[TRACE_CONTINUITY] and #[SEL_TOGGLED_AT_LATCH], we have the guarantee that - // the rows above the final output_hash are activated. If they are activated, then - // bytecode_id is maintained and pc_index decrements by 31 when we move to the top. - // From #[START_AFTER_LATCH], we have the guarantee that we cannot meet a row with - // latch == 1 before we meet start == 1 when we go up. This shows that bytecode_id, - // pc_index, and incremental_hash evolution did not deviate from the happy path. When - // we reach a row with start == 1 (we know we must reach one thanks to #[START_AFTER_LATCH] - // enforces it on the second row.), then #[IV_IS_LEN] implies that pc_index and incremental_hash - // are correctly initialized. Note also that thanks #[TRACE_CONTINUITY] and #[GET_PACKED_FIELD] - // we retrieved packed_field at the right pc_index from bc_decomposition sub-trace. + // We want to show that the output_hash at correctly enforces that it is the result of hashing + // the bytes of a given bytecode identified by bytecode_id. Thanks to #[TRACE_CONTINUITY] and + // #[SEL_TOGGLED_AT_LATCH], we have the guarantee that the rows above the final latch are activated. + // If they are activated, then bytecode_id is maintained and pc_index decrements by 31 * 3 = 93 + // when we move to the top. From #[START_AFTER_LATCH], we have the guarantee that we cannot meet + // a row with latch == 1 before we meet start == 1 when we go up. This shows that bytecode_id, + // pc_index, and incremental_hash evolution did not deviate from the happy path. + // When we reach a row with start == 1 (we know we must reach one thanks to #[START_AFTER_LATCH] + // enforces it on the second row.), then #[START_IS_SEPARATOR] implies that pc_index and packed_fields + // are correctly initialized. Note also that thanks #[TRACE_CONTINUITY] and #[GET_PACKED_FIELD_i] + // we retrieved packed_field_i at the right pc_index_i from bc_decomposition sub-trace. // We remark that before reaching another latch, a prover might add additional rows without // latch on top of the start or even add a row with start == 1. This does not have any security // impact as what matters is the guarantee to have a correct initialization at start. What is diff --git a/barretenberg/cpp/pil/vm2/bytecode/bc_retrieval.pil b/barretenberg/cpp/pil/vm2/bytecode/bc_retrieval.pil index b1f65d3a69a1..d55833561060 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/bc_retrieval.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/bc_retrieval.pil @@ -1,5 +1,4 @@ include "contract_instance_retrieval.pil"; -include "bc_hashing.pil"; include "../constants_gen.pil"; @@ -34,6 +33,7 @@ sel = 0; // This id is the bytecode commitment itself // The bytecode_id should equal the public_bytecode_commitment. +// This is constrained in bc_hashing, which is looked up via instr_fetching -> bc_decomposition. pol commit bytecode_id; pol commit error; // some error occurred. pol commit address; // contract address. @@ -61,10 +61,15 @@ pol commit private_function_root; // These should be looked up and constrained by the caller. pol commit public_data_tree_root; pol commit nullifier_tree_root; +pol commit prev_retrieved_bytecodes_tree_root; +pol commit prev_retrieved_bytecodes_tree_size; + +// next state +pol commit next_retrieved_bytecodes_tree_root; +pol commit next_retrieved_bytecodes_tree_size; pol commit instance_exists; -// The only error that can happen is if the nullifier does not exist. -error = sel * (1 - instance_exists); +instance_exists * (1 - instance_exists) = 0; #[CONTRACT_INSTANCE_RETRIEVAL] sel { @@ -81,11 +86,46 @@ sel { contract_instance_retrieval.nullifier_tree_root }; +pol commit no_remaining_bytecodes; +no_remaining_bytecodes * (1 - no_remaining_bytecodes) = 0; + +// The tree size is 1 (AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE) + retrieved_bytecodes_count +pol REMAINING_BYTECODES = constants.MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS + constants.AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE - prev_retrieved_bytecodes_tree_size; + +pol commit remaining_bytecodes_inv; + +#[NO_REMAINING_BYTECODES] +sel * (REMAINING_BYTECODES * (no_remaining_bytecodes * (1 - remaining_bytecodes_inv) + remaining_bytecodes_inv) - 1 + no_remaining_bytecodes) = 0; + +pol commit is_new_class; + +#[IS_NEW_CLASS_CHECK] +instance_exists { + current_class_id, + is_new_class, + prev_retrieved_bytecodes_tree_root +} in retrieved_bytecodes_tree_check.sel { + retrieved_bytecodes_tree_check.class_id, + retrieved_bytecodes_tree_check.leaf_not_exists, + retrieved_bytecodes_tree_check.root +}; + +// Enforce is_new_class == 0 if the instance does not exist. +sel * (1 - instance_exists) * is_new_class = 0; + +pol TOO_MANY_BYTECODES = no_remaining_bytecodes * is_new_class; + +// We error if instance doesn't exist or if we have too many bytecodes. +sel * (instance_exists * (1 - TOO_MANY_BYTECODES) - (1 - error)) = 0; + +pol commit should_retrieve; +should_retrieve = sel * (1 - error); + // Observe the following also connects the current_class_id of the instance to the class members. // Note: only need to derive the class id if the instance exists. // TODO: Probably some latch is also needed. #[CLASS_ID_DERIVATION] -instance_exists { +should_retrieve { current_class_id, artifact_hash, private_function_root, @@ -97,24 +137,37 @@ instance_exists { class_id_derivation.public_bytecode_commitment }; -// TODO(dbanks12): re-enable once C++ and PIL use standard poseidon2 hashing for bytecode commitments. -// Note: only need to hash the bytecode if the instance exists. Otherwise there is nothing to hash! -//#[BYTECODE_HASH_IS_CORRECT] -//instance_exists { bytecode_id, /*public_bytecode_commitment=*/ bytecode_id } in bc_hashing.latch { bc_hashing.bytecode_id, bc_hashing.output_hash }; +#[RETRIEVED_BYTECODES_INSERTION] +should_retrieve { + current_class_id, + should_retrieve, // 1 + prev_retrieved_bytecodes_tree_root, + prev_retrieved_bytecodes_tree_size, + next_retrieved_bytecodes_tree_root, + next_retrieved_bytecodes_tree_size +} in retrieved_bytecodes_tree_check.sel { + retrieved_bytecodes_tree_check.class_id, + retrieved_bytecodes_tree_check.write, + retrieved_bytecodes_tree_check.root, + retrieved_bytecodes_tree_check.tree_size_before_write, + retrieved_bytecodes_tree_check.write_root, + retrieved_bytecodes_tree_check.tree_size_after_write +}; -// If class ID derivation is disabled (instance does not exist), force all members to 0. +// If instance does not exist, force current_class_id to 0. #[CURRENT_CLASS_ID_IS_ZERO_IF_INSTANCE_DOES_NOT_EXIST] sel * (1 - instance_exists) * (current_class_id - 0) = 0; -#[ARTIFACT_HASH_IS_ZERO_IF_INSTANCE_DOES_NOT_EXIST] -sel * (1 - instance_exists) * (artifact_hash - 0) = 0; -#[PRIVATE_FUNCTION_ROOT_IS_ZERO_IF_INSTANCE_DOES_NOT_EXIST] -sel * (1 - instance_exists) * (private_function_root - 0) = 0; -#[BYTECODE_ID_IS_ZERO_IF_INSTANCE_DOES_NOT_EXIST] -sel * (1 - instance_exists) * (bytecode_id - 0) = 0; - -// Note: we don't need to silo and check the class id because the deployer contract guarrantees +// If class ID derivation is disabled (error), force other members to 0. +#[ARTIFACT_HASH_IS_ZERO_IF_ERROR] +error * (artifact_hash - 0) = 0; +#[PRIVATE_FUNCTION_ROOT_IS_ZERO_IF_ERROR] +error * (private_function_root - 0) = 0; +#[BYTECODE_ID_IS_ZERO_IF_ERROR] +error * (bytecode_id - 0) = 0; + +// On error, constrain next root and size to be the same as the previous ones. +error * (next_retrieved_bytecodes_tree_root - prev_retrieved_bytecodes_tree_root) = 0; +error * (next_retrieved_bytecodes_tree_size - prev_retrieved_bytecodes_tree_size) = 0; + +// Note: we don't need to silo and check the class id because the deployer contract guarantees // that if a contract instance exists, the class has been registered. - -// TODO: To ensure bytecode_id unicity inside of bc_decomposition.pil, we will have to introduce -// a permutation of the form: sel_XXX {bytecode_id} is bc_decomposition.last_of_contract {bc_decomposition.id} -// sel_XXX will have to be picked so that it selects a bytecode_id iff it has an entry in bc_decomposition diff --git a/barretenberg/cpp/pil/vm2/bytecode/class_id_derivation.pil b/barretenberg/cpp/pil/vm2/bytecode/class_id_derivation.pil index f3c417d3d7b3..bdee331ad6ef 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/class_id_derivation.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/class_id_derivation.pil @@ -15,18 +15,21 @@ namespace class_id_derivation; // Members of Contract Class Id, looked up with bc_retrieval pol commit artifact_hash; pol commit private_function_root; - pol commit public_bytecode_commitment; // This is constrained via bc_retrieval's lookup to bc_hashing + pol commit public_bytecode_commitment; // This is constrained via executions's lookup to instr_fetching, which looks up bc_decomposition <-> bc_hashing // The result of // H(GENERATOR_INDEX__CONTRACT_LEAF, artifact_hash, private_function_root, public_bytecode_commitment) pol commit class_id; - // TODO: We need this temporarily while we dont allow for aliases in the lookup tuple, there must be a better way + // TODO: We need these temporarily while we dont allow for aliases in the lookup tuple, there must be a better way pol commit temp_constant_for_lookup; sel * (temp_constant_for_lookup - constants.GENERATOR_INDEX__CONTRACT_LEAF) = 0; + pol commit const_two; + sel * (const_two - 2) = 0; + // Since the inputs to poseidon2 have to be chunks of 3, we need two lookups if we want to do this in a single row #[CLASS_ID_POSEIDON2_0] - sel { temp_constant_for_lookup, artifact_hash, private_function_root, class_id } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { temp_constant_for_lookup, artifact_hash, private_function_root, class_id, const_two } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[CLASS_ID_POSEIDON2_1] sel { public_bytecode_commitment, precomputed.zero, precomputed.zero, class_id } diff --git a/barretenberg/cpp/pil/vm2/bytecode/contract_instance_retrieval.pil b/barretenberg/cpp/pil/vm2/bytecode/contract_instance_retrieval.pil index 6c985dab9a13..aa741bf2ad07 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/contract_instance_retrieval.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/contract_instance_retrieval.pil @@ -1,6 +1,6 @@ -include "bc_hashing.pil"; include "address_derivation.pil"; include "update_check.pil"; +include "../ff_gt.pil"; /** * Contract Instance Retrieval gadget. @@ -69,6 +69,9 @@ namespace contract_instance_retrieval; // end I/O //////////////////////////////////////////////////////////////////////////// + // The address that is checked in address derivation. + pol commit derived_address; + // public keys (all hinted) pol commit nullifier_key_x; pol commit nullifier_key_y; @@ -83,9 +86,50 @@ namespace contract_instance_retrieval; pol commit deployer_protocol_contract_address; sel * (constants.CONTRACT_INSTANCE_REGISTRY_CONTRACT_ADDRESS - deployer_protocol_contract_address) = 0; + // Indicates if the instance belongs to a protocol contract + pol commit is_protocol_contract; + + // Canonical Addresses can be in the range of 1 <= address <= MAX_PROTOCOL_CONTRACTS + pol commit max_protocol_contracts; + max_protocol_contracts = sel * constants.MAX_PROTOCOL_CONTRACTS; + + pol commit address_sub_one; + address_sub_one = sel * (address - 1); + #[CHECK_PROTOCOL_ADDRESS_RANGE] + sel { max_protocol_contracts, address_sub_one, is_protocol_contract } + in + ff_gt.sel { ff_gt.a, ff_gt.b, ff_gt.result}; + + // Constrain exists and derived address for protocol contracts + + // Compute the protocol contract derived address index in the PI column as `base_offset + contract_index = base_offset + protocol_address - 1` + pol commit derived_address_pi_index; + is_protocol_contract * (constants.AVM_PUBLIC_INPUTS_PROTOCOL_CONTRACTS_ROW_IDX + address_sub_one - derived_address_pi_index) = 0; + + #[READ_DERIVED_ADDRESS_FROM_PUBLIC_INPUTS] + is_protocol_contract { + derived_address_pi_index, + derived_address + }in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + pol commit protocol_contract_derived_address_inv; + pol NOT_EXISTS = 1 - exists; + + // If the protocol contract derived address is zero, the protocol contract does not exist. Otherwise, it exists. + is_protocol_contract * (derived_address * (NOT_EXISTS * (1 - protocol_contract_derived_address_inv) + protocol_contract_derived_address_inv) - 1 + NOT_EXISTS) = 0; + + // Constrain exists and derived address for non-protocol contracts via nullifier check + + pol commit should_check_nullifier; + // Protocol contracts do not have an address nullifier in the nullifier tree. + should_check_nullifier = sel * (1 - is_protocol_contract); + // Nullifier existence check (deployment nullifier read) #[DEPLOYMENT_NULLIFIER_READ] - sel { + should_check_nullifier { exists, // does the contract address nullifier exist? gates later lookups.... address, // the deployment nullifier nullifier_tree_root, @@ -99,6 +143,10 @@ namespace contract_instance_retrieval; nullifier_check.should_silo }; + // For protocol contracts we retrieve the derived address from the protocol contract trace, else use the input address + #[UNCHANGED_ADDRESS_NON_PROTOCOL] + sel * (1 - is_protocol_contract) * (derived_address - address) = 0; + // Force members to 0 if the instance doesn't exist #[INSTANCE_MEMBER_SALT_IS_ZERO_IF_DNE] sel * (1 - exists) * salt = 0; // technically not needed since salt is hinted, but good for consistency @@ -111,11 +159,10 @@ namespace contract_instance_retrieval; #[INSTANCE_MEMBER_INIT_HASH_IS_ZERO_IF_DNE] sel * (1 - exists) * init_hash = 0; - // Address derivation lookup (only when nullifier exists) - // If the address nullifier doesn't exist, there is no need to derive the address from its parts! + // Address derivation lookup (only if the nullifier exists or for protocol contract instances) #[ADDRESS_DERIVATION] exists { - address, + derived_address, salt, // hinted deployer_addr, original_class_id, @@ -145,9 +192,11 @@ namespace contract_instance_retrieval; }; // Enforce that the class id provided is the _current_ one (only when nullifier exists) - // If the address nullifier doesn't exist, there is no need to check! + // If the address nullifier doesn't exist (which excludes protocol contract instances), there is no need to check! + pol commit should_check_for_update; + should_check_for_update = should_check_nullifier * exists; #[UPDATE_CHECK] - exists { + should_check_for_update { address, current_class_id, original_class_id, diff --git a/barretenberg/cpp/pil/vm2/bytecode/instr_fetching.pil b/barretenberg/cpp/pil/vm2/bytecode/instr_fetching.pil index 2343bcd212fd..9736b9944627 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/instr_fetching.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/instr_fetching.pil @@ -288,7 +288,7 @@ op3 = (1 - PARSING_ERROR_EXCEPT_TAG_ERROR) * (sel_op_dc_0 * (bd7 * 2**8 + bd8 * #[OP4_BYTES_DECOMPOSITION] op4 = (1 - PARSING_ERROR_EXCEPT_TAG_ERROR) * (sel_op_dc_0 * (bd9 * 2**8 + bd10 * 2**0) + sel_op_dc_5 * (bd8 * 2**8 + bd9 * 2**0)); #[OP5_BYTES_DECOMPOSITION] -op5 = (1 - PARSING_ERROR_EXCEPT_TAG_ERROR) * (sel_op_dc_0 * (bd11 * 2**8 + bd12 * 2**0)); +op5 = (1 - PARSING_ERROR_EXCEPT_TAG_ERROR) * (sel_op_dc_0 * (bd11 * 2**8 + bd12 * 2**0) + sel_op_dc_5 * (bd10 * 2**8 + bd11 * 2**0)); #[OP6_BYTES_DECOMPOSITION] op6 = (1 - PARSING_ERROR_EXCEPT_TAG_ERROR) * (sel_op_dc_1 * (bd13 * 2**8 + bd14 * 2**0)); #[OP7_BYTES_DECOMPOSITION] diff --git a/barretenberg/cpp/pil/vm2/bytecode/update_check.pil b/barretenberg/cpp/pil/vm2/bytecode/update_check.pil index d638a01f9f33..c007aea2a40f 100644 --- a/barretenberg/cpp/pil/vm2/bytecode/update_check.pil +++ b/barretenberg/cpp/pil/vm2/bytecode/update_check.pil @@ -61,8 +61,8 @@ namespace update_check; pol commit delayed_public_mutable_slot; #[DELAYED_PUBLIC_MUTABLE_SLOT_POSEIDON2] - sel { updated_class_ids_slot, address, precomputed.zero, delayed_public_mutable_slot } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { sel /* =1 */, updated_class_ids_slot, address, precomputed.zero, delayed_public_mutable_slot } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; pol commit delayed_public_mutable_hash_slot; sel * (delayed_public_mutable_slot + constants.UPDATES_DELAYED_PUBLIC_MUTABLE_VALUES_LEN - delayed_public_mutable_hash_slot) = 0; @@ -108,8 +108,8 @@ namespace update_check; pol commit update_preimage_post_class_id; #[UPDATE_HASH_POSEIDON2] - hash_not_zero { update_preimage_metadata, update_preimage_pre_class_id, update_preimage_post_class_id, update_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + hash_not_zero { sel /* =1 */, update_preimage_metadata, update_preimage_pre_class_id, update_preimage_post_class_id, update_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; // ======== TIMESTAMP OF CHANGE READ ======== @@ -138,19 +138,14 @@ namespace update_check; update_hi_metadata * TWO_POW_32 + timestamp_of_change - update_preimage_metadata = 0; // ======== CLASS ID CHECK ======== - // We are going to use the standard way to compare integers. - // We request the boolean as a hint, and then we constrain it by subtracting in one direction or the other depending on it. - // If the boolean hint is incorrect, then the subtraction will overflow the field and the range constraint will fail. - pol commit timestamp_is_lt_timestamp_of_change; - timestamp_is_lt_timestamp_of_change * (1 - timestamp_is_lt_timestamp_of_change) = 0; - pol commit timestamp_of_change_subtraction; - pol TIMESTAMP_LT_TIMESTAMP_OF_CHANGE = timestamp_of_change - 1 - timestamp; - pol TIMESTAMP_GTE_TIMESTAMP_OF_CHANGE = timestamp - timestamp_of_change; - (TIMESTAMP_LT_TIMESTAMP_OF_CHANGE - TIMESTAMP_GTE_TIMESTAMP_OF_CHANGE) * timestamp_is_lt_timestamp_of_change + TIMESTAMP_GTE_TIMESTAMP_OF_CHANGE - timestamp_of_change_subtraction = 0; - - #[TIMESTAMP_OF_CHANGE_CMP_RANGE] - hash_not_zero { timestamp_of_change_subtraction, timestamp_of_change_bit_size } - in range_check.sel { range_check.value, range_check.rng_chk_bits }; + // We are going to use the gt gadget to compare `timestamp` and `timestamp_of_change` and derive + // the boolean value of `timestamp_is_lt_timestamp_of_change`. + pol commit timestamp_is_lt_timestamp_of_change; // Boolean value enforced by gt gadget (under condition hash_not_zero == 1). + + // Equivalent to "timestamp_of_change > timestamp" + #[TIMESTAMP_IS_LT_TIMESTAMP_OF_CHANGE] + hash_not_zero { timestamp_of_change, timestamp, timestamp_is_lt_timestamp_of_change } + in gt.sel_others { gt.input_a, gt.input_b, gt.res }; pol commit update_pre_class_id_is_zero; update_pre_class_id_is_zero * (1 - update_pre_class_id_is_zero) = 0; diff --git a/barretenberg/cpp/pil/vm2/calldata.pil b/barretenberg/cpp/pil/vm2/calldata.pil index ab983e64a902..04068efb3873 100644 --- a/barretenberg/cpp/pil/vm2/calldata.pil +++ b/barretenberg/cpp/pil/vm2/calldata.pil @@ -1,7 +1,44 @@ include "precomputed.pil"; include "calldata_hashing.pil"; -// Calldata is largely going to be constrained by calldata hashing +// ########### +// Calldata +// ########### +// +// This circuit fills the calldata column with one field per row. We constrain that the index +// increments by one each row until we begin a new calldata instance, where the context_id must +// increase and latch must indicate the final row. +// +// The values in the calldata columns are really hints. Their correctness is constrained by calldata_hashing.pil, +// where the size and hash of the calldata is validated. The hash is looked up in the tx and matched to +// the one in a tx's public inputs, and the size is looked up for each call. These values should be +// looked up via calldata_hashing.pil's latch against the context_id. +// +// For empty calldata, we have a special case where no value exists, but should have a row in this +// trace to indicate that a certain context_id has been processed: +// index | value | context_id | latch | +// -------+--------+------------+-------+ +// 0 | 0 | id | 1 | +// This is the only case in which index = 0 and sel = 1. Note that no lookups should access this trace +// with index = 0 unless to confirm that the calldata is empty (data_copy and calldata_hashing lookup +// all existing values with a +1). +// +// e.g. calldata: [0x111, 0x222, 0x333, 0x444] +// calldata.pil: +// index | value | context_id | latch | +// -------+--------+------------+-------+ +// 1 | 0x111 | 1 | 0 | +// 2 | 0x222 | 1 | 0 | +// 3 | 0x333 | 1 | 0 | +// 4 | 0x444 | 1 | 1 | +// +// e.g. cd_hash_0 = H([sep, 0x111, 0x222, 0x333, 0x444]) +// calldata_hashing.pil: +// index_0_ | index_1_ | index_2_ | context_id | input_0_ | input_1_ | input_2_ | output_hash | latch | start | rounds_rem | padding | calldata_size | +// -----------+-----------+----------+------------+----------+----------+----------+-------------+-------+-------+------------+---------+---------------+ +// 0 | 1 | 2 | 1 | sep | 0x111 | 0x222 | cd_hash_0 | 0 | 1 | 2 | 0 | 4 | +// 3 | 4 | 5 | 1 | 0x333 | 0x444 | 0 | cd_hash_0 | 1 | 0 | 1 | 1 | 4 | + namespace calldata; #[skippable_if] @@ -10,10 +47,31 @@ namespace calldata; pol commit sel; pol commit value; pol commit context_id; - // TODO: Index starts at one - pol commit index; // 1 - indexed + // **NOTE** The index starts at one in this trace (see above comment for special case of empty calldata and index = 0): + // We do not currently constrain this (or the existence of a 'start' column) since calldata_hashing constrains it + // and all looked up calldata values are basically hints which must also go through calldata_hashing. + // e.g. data_copy will look up values in this trace by context_id. We know that these values are valid (and + // start at index=1) because data_copy also looks up the calldata size by the same context_id, constrained by + // calldata_hashing, which itself constrains the index. + // We could constrain this here by introducing a start column: + // pol commit start; + // Adding relations like (see calldata_hashing.pil's): + // #[START_AFTER_LATCH] + // sel' * (start' - FIRST_OR_LAST_CALLDATA) = 0; + // #[START_INDEX_IS_ONE] + // sel * start * (is_not_empty * (index - 1) + is_empty * index) = 0; + // If start = 1, #[START_INDEX_IS_ONE] should constrain that index = 1 unless we have empty calldata. In that case + // the 'special' row must have latch = 1, index = 0, value = 0, and will be constrained by calldata_hashing to have + // size = 0. This is a different case to a calldata of one field of value 0, where index = 1. + pol commit index; // Designates end of calldata for that context_id pol commit latch; + latch * (1 - latch) = 0; + + // latch == 1 ==> sel == 1 + #[SEL_TOGGLED_AT_LATCH] + latch * (1 - sel) = 0; + pol FIRST_OR_LAST_CALLDATA = precomputed.first_row + latch; // Index increments until latch sel * (1 - FIRST_OR_LAST_CALLDATA) * (index' - index - 1) = 0; @@ -25,4 +83,12 @@ namespace calldata; // Context id does not change until we latch #[CONTEXT_ID_CONTINUITY] - (1 - precomputed.first_row) * sel * (1 - latch) * (1 - context_id) * context_id' = 0; + (1 - FIRST_OR_LAST_CALLDATA) * (context_id - context_id') = 0; + + // We ensure that context_id is always different and increasing at each latch: + pol commit diff_context_id; + diff_context_id = latch * sel' * (context_id' - context_id - 1); + + #[RANGE_CHECK_CONTEXT_ID_DIFF] + latch { diff_context_id } in precomputed.sel_range_16 { precomputed.clk }; + diff --git a/barretenberg/cpp/pil/vm2/calldata_hashing.pil b/barretenberg/cpp/pil/vm2/calldata_hashing.pil index 4a37b45a2b16..62a7ab963327 100644 --- a/barretenberg/cpp/pil/vm2/calldata_hashing.pil +++ b/barretenberg/cpp/pil/vm2/calldata_hashing.pil @@ -1,8 +1,39 @@ +include "calldata.pil"; include "precomputed.pil"; include "poseidon2_hash.pil"; +include "constants_gen.pil"; + +// ########### +// Calldata Hashing +// ########### +// +// This circuit constrains the correctness of the calldata hash (output_hash) and +// size (calldata_size) from the calldata.pil trace. The hash is used in the public inputs +// (see tx.pil -> #[READ_PUBLIC_CALL_REQUEST_PHASE]) and the size in execution (see tx.pil -> +// #[DISPATCH_EXEC_START] and data_copy.pil -> #[DISPATCH_CD_COPY], 'parent_calldata_size') +// for accessing calldata. +// +// As in bytecode hashing, we must process each field of the calldata and ensure it is included +// in the preimage to a poseidon2 hash with no extra or omitted fields. Each row corresponds to a +// poseidon permutation (3 fields). We constrain the size and output hash fully once each row is +// processed, so this trace should be looked up by the final row (where latch == 1). +// +// Note that in calldata.pil, the incrementing index starts at 1: +// cd_hash_0 = H([sep, 0x111, 0x222, 0x333, 0x444]) +// cd_hash_1 = H([sep, 0xaaa, 0xbbb]) +// index_0_ | index_1_ | index_2_ | context_id | input_0_ | input_1_ | input_2_ | output_hash | latch | start | rounds_rem | padding | calldata_size | +// -----------+-----------+----------+------------+----------+----------+----------+-------------+-------+-------+------------+---------+---------------+ +// 0 | 1 | 2 | 1 | sep | 0x111 | 0x222 | cd_hash_0 | 0 | 1 | 2 | 0 | 4 | +// 3 | 4 | 5 | 1 | 0x333 | 0x444 | 0 | cd_hash_0 | 1 | 0 | 1 | 1 | 4 | +// 0 | 1 | 2 | 2 | sep | 0xaaa | 0xbbb | cd_hash_1 | 1 | 1 | 1 | 0 | 2 | +// +// For empty calldata, we lookup a special row to the calldata.pil trace where index = 0 and latch = 1 for the context_id: +// index_0_ | index_1_ | index_2_ | context_id | input_0_ | input_1_ | input_2_ | output_hash | latch | start | rounds_rem | padding | calldata_size | +// -----------+-----------+----------+------------+----------+----------+----------+-------------+-------+-------+------------+---------+---------------+ +// 0 | 1 | 2 | 1 | sep | 0 | 0 | H([sep]) | 1 | 1 | 1 | 2 | 0 | +// +namespace calldata_hashing; -namespace cd_hashing; - #[skippable_if] sel = 0; @@ -11,25 +42,179 @@ namespace cd_hashing; #[TRACE_CONTINUITY] (1 - precomputed.first_row) * (1 - sel) * sel' = 0; - pol commit context_id; - pol commit length_remaining; pol commit latch; + latch * (1 - latch) = 0; + + // latch == 1 ==> sel == 1 + #[SEL_TOGGLED_AT_LATCH] + latch * (1 - sel) = 0; + + // Given both latch and first_row are boolean and that latch cannot be activated at first row (sel would have + // to be activated which is impossible on first row.), LATCH_CONDITION is a boolean. + pol LATCH_CONDITION = latch + precomputed.first_row; + + pol commit context_id; + #[ID_CONSISTENCY] + (1 - LATCH_CONDITION) * (context_id' - context_id) = 0; + + pol commit calldata_size; + #[SIZE_CONSISTENCY] + (1 - LATCH_CONDITION) * (calldata_size' - calldata_size) = 0; + + // The start of a new context id and new set of hashing runs: + pol commit start; + start * (1 - start) = 0; + + // Needs to be a committed column as it is used in the lookup: + pol commit sel_not_start; + sel_not_start = sel * (1 - start); + + // If the current row is a latch or the first row, the next row should be a start (if it's active): + #[START_AFTER_LATCH] + sel' * (start' - LATCH_CONDITION) = 0; - // TODO: Add more - // These will be the inputs to the poseidon2 round + // The inputs to the poseidon2 hash. The very first input (index == 0) is the separator, the remaining + // inputs are calldata fields matching those in calldata.pil pol commit input[3]; - // At the last round, it is checked against the public inputs/tx trace - pol commit output_hash; + // The index of the inputs to the poseidon2 hash, starting at 0. + // NOTE: the calldata trace starts at 1, so we don't have to shift by one (for the prepended separator) + // to match values. This also means the value of the final index == the calldata size in fields. + pol commit index[3]; + + // We must start at index == 0: + #[START_INDEX_IS_ZERO] + start * index[0] = 0; + + // At the start of a new calldata hash, the initial field has to be the separator, and we skip the lookup: + #[START_IS_SEPARATOR] + start * (input[0] - constants.GENERATOR_INDEX__PUBLIC_CALLDATA) = 0; + + // The index increments by 3 each row (unless we are at latch): + #[INDEX_INCREMENTS] + sel * (1 - LATCH_CONDITION) * (index[0]' - (index[0] + 3)) = 0; + + // Each index in the array increments (note: ideally we would just use a single index, but we need to use + // index + 1 and index + 2 in the lookups to calldata.pil): + #[INDEX_INCREMENTS_1] + sel * (index[1] - (index[0] + 1)) = 0; - // This is incomplete, probably require round information - #[CD_HASH] - sel {input[0], input[1], input[2] } + #[INDEX_INCREMENTS_2] + sel * (index[2] - (index[1] + 1)) = 0; + + #[GET_CALLDATA_FIELD_0] + sel_not_start { index[0], context_id, input[0] } in - poseidon2_hash.sel { - poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2 - }; + calldata.sel { calldata.index, calldata.context_id, calldata.value }; - #[CD_HASH_END] - latch { output_hash } + #[GET_CALLDATA_FIELD_1] + sel_not_padding_1 { index[1], context_id, input[1] } in - poseidon2_hash.sel { poseidon2_hash.output }; + calldata.sel { calldata.index, calldata.context_id, calldata.value }; + + #[GET_CALLDATA_FIELD_2] + sel_not_padding_2 { index[2], context_id, input[2] } + in + calldata.sel { calldata.index, calldata.context_id, calldata.value }; + + // Padding + + // We lookup the poseidon inputs in chunks of 3 (to match the pos. perm.), so if the total number of fields hashed is not + // a multiple of 3, we have some padding field values (=0). These will fail the lookups into calldata.pil. + // Note: input[0] must never be a padding value and padding can only occur at the last row (i.e. latch = 1). + + // Needs to be committed columns as they are used in the lookups + pol commit sel_not_padding_1, sel_not_padding_2; + sel_not_padding_1 * (1 - sel_not_padding_1) = 0; + sel_not_padding_2 * (1 - sel_not_padding_2) = 0; + + // PADDING_1 == 1 <==> input[1] is a padding value ==> (see #[PADDING_CONSISTENCY]) PADDING_2 == 1 + pol PADDING_1 = sel * (1 - sel_not_padding_1); + // PADDING_2 == 1 <==> input[2] is a padding value <==> we have any padded values + pol PADDING_2 = sel * (1 - sel_not_padding_2); + + // Note: the poseidon trace does not constrain that padded fields == 0, so we must do so here: + // padding_1 == 1 ==> input[1] == 0 + #[PADDED_BY_ZERO_1] + PADDING_1 * input[1] = 0; + // padding_2 == 1 ==> input[2] == 0 + #[PADDED_BY_ZERO_2] + PADDING_2 * input[2] = 0; + + // Note: we could defer much of the below calculation to the poseidon trace with an additional lookup with + // poseidon2_hash.start == 1, poseidon2_hash.padding = sel_padding_1 + sel_padding_2 (would need new col.s). + // We could probably include the rounds_rem check in this as well. + // Maybe this is worth the overhead? + + // If input[1] is a padding value, input[2] must also be a padding value: + // padding_1 == 1 ==> padding_2 == 1 + #[PADDING_CONSISTENCY] + PADDING_1 * sel_not_padding_2 = 0; + // padding_2 == 1 ==> latch == 1 + #[PADDING_END] + PADDING_2 * (1 - latch) = 0; + + // Only valid at the final (latch = 1) row. In all other rows, PADDING_1 = PADDING_2 = 0: + // padding_1 == 1 & padding_2 == 1 <==> calldata_size = index[0] + // padding_1 == 0 & padding_2 == 1 <==> calldata_size = index[1] + // padding_1 == 0 & padding_2 == 0 <==> calldata_size = index[2] + #[CHECK_FINAL_INDEX] + latch * ( calldata_size - ( + PADDING_1 * index[0] + // #[PADDING_CONSISTENCY] constrains that PADDING_1 = 1 ==> PADDING_2 = 1 ==> two padding fields + (PADDING_2 - PADDING_1) * index[1] + // one padding field ==> calldata_size = index[1] + sel_not_padding_2 * index[2] // no padding ==> calldata_size = index[2] + )) = 0; + + // Check that our claimed calldata_size matches the final index given by calldata.pil: + // Note: we do not need to check the value here, because #[CHECK_FINAL_INDEX] checks that calldata_size is one of the values of + // index[] looked up above. + // Note: we could probably utilise the other value lookups to check this, but would need to define col.s like + // input_1_is_final to check against calldata.latch for each value at every row. Worth the overhead? + // For empty calldata, the below looks up a special row where calldata.latch = 1 and calldata.index = 0: + #[CHECK_FINAL_SIZE] + latch { calldata_size, context_id } + in + calldata.latch { calldata.index, calldata.context_id }; + + // At the last round, it is checked against the public inputs/tx trace: + pol commit output_hash; + + #[HASH_CONSISTENCY] + (1 - LATCH_CONDITION) * (output_hash' - output_hash) = 0; + + // The length of the hashed calldata in fields, including the prepended separator. We use it to look up into poseidon_2 to ensure that + // the hashed IV matches our calldata length. We cannot just rely on rounds_rem since the padding could be 1 or 2 off. + pol commit input_len; + + #[CALLDATA_HASH_INPUT_LENGTH_FIELDS] + sel * (input_len - (calldata_size + 1)) = 0; + + // The number of rounds (rows) remaining to completely hash the calldata. + // We use it to ensure the ordering of poseidon_2 rounds is correct (i.e. a malicious prover cannot swap the order of poseidon rounds). + pol commit rounds_rem; + + // The rounds remaining decrement each row as long as the row is not latched, otherwise rounds_rem == 1 + // Note: may be able to omit latch * (rounds_rem - 1) since the poseidon trace also constrains this + #[ROUNDS_DECREMENT] + sel * ((1 - LATCH_CONDITION) * (rounds_rem' - rounds_rem + 1) + latch * (rounds_rem - 1)) = 0; + + + #[POSEIDON2_HASH] + sel { + start, + latch, + input[0], + input[1], + input[2], + input_len, + rounds_rem, + output_hash + } in poseidon2_hash.sel { + poseidon2_hash.start, + poseidon2_hash.end, + poseidon2_hash.input_0, + poseidon2_hash.input_1, + poseidon2_hash.input_2, + poseidon2_hash.input_len, + poseidon2_hash.num_perm_rounds_rem, + poseidon2_hash.output + }; diff --git a/barretenberg/cpp/pil/vm2/constants_gen.pil b/barretenberg/cpp/pil/vm2/constants_gen.pil index a8376681ecf5..8509f017ea13 100644 --- a/barretenberg/cpp/pil/vm2/constants_gen.pil +++ b/barretenberg/cpp/pil/vm2/constants_gen.pil @@ -12,13 +12,20 @@ namespace constants; pol MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX = 64; pol MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX = 63; pol MAX_L2_TO_L1_MSGS_PER_TX = 8; - pol MAX_PUBLIC_LOGS_PER_TX = 8; pol MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS = 3000; + pol MAX_PROTOCOL_CONTRACTS = 11; + pol CANONICAL_AUTH_REGISTRY_ADDRESS = 1; pol CONTRACT_INSTANCE_REGISTRY_CONTRACT_ADDRESS = 2; + pol CONTRACT_CLASS_REGISTRY_CONTRACT_ADDRESS = 3; + pol MULTI_CALL_ENTRYPOINT_ADDRESS = 4; pol FEE_JUICE_ADDRESS = 5; + pol ROUTER_ADDRESS = 6; pol FEE_JUICE_BALANCES_SLOT = 1; pol UPDATED_CLASS_IDS_SLOT = 1; - pol PUBLIC_LOG_SIZE_IN_FIELDS = 13; + pol FLAT_PUBLIC_LOGS_HEADER_LENGTH = 1; + pol FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH = 4096; + pol PUBLIC_LOGS_LENGTH = 4097; + pol PUBLIC_LOG_HEADER_LENGTH = 2; pol MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS = 21; pol MEM_TAG_FF = 0; pol MEM_TAG_U1 = 1; @@ -43,10 +50,11 @@ namespace constants; pol AVM_SUBTRACE_ID_TO_RADIX = 64; pol AVM_SUBTRACE_ID_ECC = 128; pol AVM_SUBTRACE_ID_KECCAKF1600 = 256; - pol AVM_SUBTRACE_ID_DATA_COPY = 512; + pol AVM_SUBTRACE_ID_CALLDATA_COPY = 512; pol AVM_SUBTRACE_ID_GETCONTRACTINSTANCE = 1024; pol AVM_SUBTRACE_ID_EMITUNENCRYPTEDLOG = 2048; pol AVM_SUBTRACE_ID_SHA256_COMPRESSION = 4096; + pol AVM_SUBTRACE_ID_RETURNDATA_COPY = 8192; pol AVM_DYN_GAS_ID_CALLDATACOPY = 1; pol AVM_DYN_GAS_ID_RETURNDATACOPY = 2; pol AVM_DYN_GAS_ID_TORADIX = 4; @@ -95,53 +103,58 @@ namespace constants; pol AVM_PUBLIC_INPUTS_GLOBAL_VARIABLES_COINBASE_ROW_IDX = 5; pol AVM_PUBLIC_INPUTS_GLOBAL_VARIABLES_FEE_RECIPIENT_ROW_IDX = 6; pol AVM_PUBLIC_INPUTS_GLOBAL_VARIABLES_GAS_FEES_ROW_IDX = 7; - pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_ROW_IDX = 8; - pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX = 8; - pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX = 9; - pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX = 10; - pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX = 11; - pol AVM_PUBLIC_INPUTS_START_GAS_USED_ROW_IDX = 12; - pol AVM_PUBLIC_INPUTS_GAS_SETTINGS_ROW_IDX = 13; - pol AVM_PUBLIC_INPUTS_FEE_PAYER_ROW_IDX = 18; - pol AVM_PUBLIC_INPUTS_PUBLIC_SETUP_CALL_REQUESTS_ROW_IDX = 22; - pol AVM_PUBLIC_INPUTS_PUBLIC_APP_LOGIC_CALL_REQUESTS_ROW_IDX = 54; - pol AVM_PUBLIC_INPUTS_PUBLIC_TEARDOWN_CALL_REQUEST_ROW_IDX = 86; - pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_ROW_IDX = 87; - pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_ROW_IDX = 90; - pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ROW_IDX = 93; - pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 93; - pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 157; - pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 221; - pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ROW_IDX = 229; - pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 229; - pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 293; - pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 357; - pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_ROW_IDX = 365; - pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX = 365; - pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX = 366; - pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX = 367; - pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX = 368; - pol AVM_PUBLIC_INPUTS_END_GAS_USED_ROW_IDX = 369; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NOTE_HASHES_ROW_IDX = 370; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NULLIFIERS_ROW_IDX = 371; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_L2_TO_L1_MSGS_ROW_IDX = 372; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_PUBLIC_LOGS_ROW_IDX = 373; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_PUBLIC_DATA_WRITES_ROW_IDX = 374; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ROW_IDX = 375; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 375; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 439; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 503; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_LOGS_ROW_IDX = 511; - pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_DATA_WRITES_ROW_IDX = 615; - pol AVM_PUBLIC_INPUTS_TRANSACTION_FEE_ROW_IDX = 679; - pol AVM_PUBLIC_INPUTS_REVERTED_ROW_IDX = 680; - pol AVM_PUBLIC_INPUTS_COLUMNS_MAX_LENGTH = 681; + pol AVM_PUBLIC_INPUTS_PROTOCOL_CONTRACTS_ROW_IDX = 8; + pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_ROW_IDX = 19; + pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX = 19; + pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX = 20; + pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX = 21; + pol AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX = 22; + pol AVM_PUBLIC_INPUTS_START_GAS_USED_ROW_IDX = 23; + pol AVM_PUBLIC_INPUTS_GAS_SETTINGS_ROW_IDX = 24; + pol AVM_PUBLIC_INPUTS_GAS_SETTINGS_GAS_LIMITS_ROW_IDX = 24; + pol AVM_PUBLIC_INPUTS_GAS_SETTINGS_TEARDOWN_GAS_LIMITS_ROW_IDX = 25; + pol AVM_PUBLIC_INPUTS_FEE_PAYER_ROW_IDX = 29; + pol AVM_PUBLIC_INPUTS_PUBLIC_SETUP_CALL_REQUESTS_ROW_IDX = 34; + pol AVM_PUBLIC_INPUTS_PUBLIC_APP_LOGIC_CALL_REQUESTS_ROW_IDX = 66; + pol AVM_PUBLIC_INPUTS_PUBLIC_TEARDOWN_CALL_REQUEST_ROW_IDX = 98; + pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_ROW_IDX = 99; + pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_ROW_IDX = 102; + pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ROW_IDX = 105; + pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 105; + pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 169; + pol AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 233; + pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ROW_IDX = 241; + pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 241; + pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 305; + pol AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 369; + pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_ROW_IDX = 377; + pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX = 377; + pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX = 378; + pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX = 379; + pol AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX = 380; + pol AVM_PUBLIC_INPUTS_END_GAS_USED_ROW_IDX = 381; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NOTE_HASHES_ROW_IDX = 382; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NULLIFIERS_ROW_IDX = 383; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_L2_TO_L1_MSGS_ROW_IDX = 384; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_PUBLIC_DATA_WRITES_ROW_IDX = 385; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ROW_IDX = 386; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX = 386; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX = 450; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX = 514; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_LOGS_ROW_IDX = 522; + pol AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_DATA_WRITES_ROW_IDX = 4619; + pol AVM_PUBLIC_INPUTS_TRANSACTION_FEE_ROW_IDX = 4683; + pol AVM_PUBLIC_INPUTS_REVERTED_ROW_IDX = 4684; + pol AVM_PUBLIC_INPUTS_COLUMNS_MAX_LENGTH = 4685; pol AVM_NUM_PUBLIC_INPUT_COLUMNS = 4; - pol AVM_PUBLIC_INPUTS_COLUMNS_COMBINED_LENGTH = 2724; + pol AVM_PUBLIC_INPUTS_COLUMNS_COMBINED_LENGTH = 18740; pol AVM_PC_SIZE_IN_BITS = 32; pol AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_HEIGHT = 6; pol AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_INITIAL_ROOT = 18291678969210913367302010540259942201271604198321103848479209155223586227821; pol AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_INITIAL_SIZE = 1; + pol AVM_RETRIEVED_BYTECODES_TREE_HEIGHT = 5; + pol AVM_RETRIEVED_BYTECODES_TREE_INITIAL_ROOT = 7257575663883662864904159007845791361042428565864275462740313586853981161757; + pol AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE = 1; pol TIMESTAMP_OF_CHANGE_BIT_SIZE = 32; pol UPDATES_DELAYED_PUBLIC_MUTABLE_VALUES_LEN = 3; pol UPDATES_DELAYED_PUBLIC_MUTABLE_METADATA_BIT_SIZE = 144; diff --git a/barretenberg/cpp/pil/vm2/context.pil b/barretenberg/cpp/pil/vm2/context.pil index 7a26fe508eeb..917ed3c375d1 100644 --- a/barretenberg/cpp/pil/vm2/context.pil +++ b/barretenberg/cpp/pil/vm2/context.pil @@ -1,5 +1,7 @@ include "context_stack.pil"; +// TODO: Usage of sel_error in this whole file has to be reconsidered. + // This is a virtual gadget, which is part of the execution trace. // This subtrace is focused on managing the changes to the context. // By default (i.e. when not executing a context-changing opcode), @@ -10,14 +12,18 @@ namespace execution; #[skippable_if] sel = 0; + // TODO: make sure that all uses of `sel_error` here really should be "error" only + // since `sel_error` will not be ` for the REVERT opcode. If we need "error or REVERT", + // use `sel_failure`. + // Guaranteed to be boolean because sel_execute_call & sel_execute_static_call are mutually exclusive pol commit sel_enter_call; - // Handle error separately since it takes priority and may occur during an sel_x_call - sel_enter_call = (sel_execute_call + sel_execute_static_call) * (1 - sel_error); + // The following selectors will be 0 if there is an error during execution (before the opcode execution step). + sel_enter_call = sel_execute_call + sel_execute_static_call; // CALL & precomputed.first_row are mutually exclusive sel_enter_call * precomputed.first_row = 0; - // sel_exit_call is used to flag if we are returning or reverting or we error + // sel_exit_call is used to flag if we are returning or reverting or there has been ANY error during execution // sel_execute_revert & sel_execute_return are mutually exclusive pol commit sel_exit_call; sel_exit_call = 1 - (1 - sel_execute_revert - sel_execute_return) * (1 - sel_error); @@ -31,12 +37,15 @@ namespace execution; pol commit contract_address; pol commit bytecode_id; pol commit transaction_fee; - // Constrained boolean by tx trace (for enqueued call) and #[NEXT_IS_STATIC] for nested + // Constrained boolean by tx trace for enqueued call, #[IS_STATIC_NEXT_ROW] during normal execution, + // IS_STATIC_IF_STATIC_CALL+IS_STATIC_IF_CALL_FROM_STATIC_CONTEXT for nested calls, + // and CTX_STACK_CALL for returns or failures. pol commit is_static; pol commit parent_calldata_addr; pol commit parent_calldata_size; + pol commit last_child_id; pol commit last_child_returndata_addr; pol commit last_child_returndata_size; pol commit last_child_success; // Careful with this for now... @@ -44,6 +53,10 @@ namespace execution; // ==== Gas ==== pol commit l2_gas_limit; pol commit da_gas_limit; + + pol commit prev_l2_gas_used; + pol commit prev_da_gas_used; + pol commit l2_gas_used; pol commit da_gas_used; @@ -70,6 +83,9 @@ namespace execution; // L1 to L2 tree doesn't evolve during execution of the AVM pol commit l1_l2_tree_root; + pol commit prev_retrieved_bytecodes_tree_root; + pol commit prev_retrieved_bytecodes_tree_size; + // Next Tree State pol commit note_hash_tree_root; // TODO: Constrain root, sizes and emitted not changing unless specific opcode selectors are on pol commit note_hash_tree_size; @@ -85,12 +101,15 @@ namespace execution; pol commit written_public_data_slots_tree_root; pol commit written_public_data_slots_tree_size; + pol commit retrieved_bytecodes_tree_root; + pol commit retrieved_bytecodes_tree_size; + // Prev side effects state - pol commit prev_num_unencrypted_logs; + pol commit prev_num_unencrypted_log_fields; pol commit prev_num_l2_to_l1_messages; // Next side effects state - pol commit num_unencrypted_logs; + pol commit num_unencrypted_log_fields; pol commit num_l2_to_l1_messages; // ==== Helper columns ==== @@ -114,16 +133,23 @@ namespace execution; // next_context_id increments with each invocation of an external call or new enqueued call pol commit next_context_id; // Can be replaced by clk + + // We always start enqueued calls with fresh context ids, so the next context id must start at context_id + 1 + #[ENQUEUED_CALL_START_NEXT_CTX_ID] + enqueued_call_start * (context_id + 1 - next_context_id) = 0; + // The initial next_context_id = 2, in row = 1 #[INCR_NEXT_CONTEXT_ID] - NOT_LAST_EXEC * (next_context_id' - (next_context_id + sel_enter_call)) = 0; + NOT_LAST_EXEC * (next_context_id' - (next_context_id + sel_enter_call + enqueued_call_start')) = 0; // nested_exit_call = 1 ==> context_id' = parent_id // sel_enter_call = 1 ==> context_id' = next_context_id // otherwise = 0 ==> context_id' = context_id - #[CONTEXT_ID_CALL_NEXT_ROW] + #[CONTEXT_ID_NEXT_ROW] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (context_id' - context_id) = 0; + #[CONTEXT_ID_EXT_CALL] NOT_LAST_EXEC * sel_enter_call * (context_id' - next_context_id) = 0; + #[CONTEXT_ID_NESTED_EXIT] NOT_LAST_EXEC * nested_exit_call * (context_id' - parent_id) = 0; // nested_exit_call = 1 ==> constraints come from lookup @@ -162,7 +188,6 @@ namespace execution; #[BYTECODE_ID_NEXT_ROW] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (bytecode_id' - bytecode_id) = 0; - // transaction fee gets set at the start of an enqueued call and remains the same throughout // TODO(dbanks12): tracegen and test this #[TRANSACTION_FEE_NEXT_ROW] @@ -176,40 +201,69 @@ namespace execution; // otherwise = 0 ==> is_static' = is_static #[IS_STATIC_NEXT_ROW] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (is_static' - is_static) = 0; - NOT_LAST_EXEC * sel_enter_call * (is_static' - sel_execute_static_call) = 0; + // An external call from a non-static context only creates a nested static context if the opcode is STATICCALL. + #[IS_STATIC_IF_STATIC_CALL] + NOT_LAST_EXEC * sel_enter_call * (1 - is_static) * (is_static' - sel_execute_static_call) = 0; + // An external call from a static context always creates a nested static context. + #[IS_STATIC_IF_CALL_FROM_STATIC_CONTEXT] + NOT_LAST_EXEC * sel_enter_call * is_static * (is_static' - 1) = 0; // nested_exit_call = 1 ==> constraints come from lookup - // sel_enter_call = 1 ==> parent_calldata_addr' = rop[3] (resolved operand 4 from execution trace) + // sel_enter_call = 1 ==> parent_calldata_addr' = rop[4] (resolved operand 5 from execution trace) // otherwise = 0 ==> parent_calldata_addr' = parent_calldata_addr #[CD_OFFSET_NEXT_ROW] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (parent_calldata_addr' - parent_calldata_addr) = 0; - NOT_LAST_EXEC * sel_enter_call * (parent_calldata_addr' - rop[3]) = 0; + NOT_LAST_EXEC * sel_enter_call * (parent_calldata_addr' - rop[4]) = 0; // nested_exit_call = 1 ==> constraints come from lookup - // sel_enter_call = 1 ==> parent_calldata_size' = rop[4] (resolved operand 5 from execution trace) - // otherwise = 0 ==> parent_calldata_siz' = parent_calldata_size + // sel_enter_call = 1 ==> parent_calldata_size' = register[3] (register 4 from execution trace) + // otherwise = 0 ==> parent_calldata_size' = parent_calldata_size #[CD_SIZE_NEXT_ROW] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (parent_calldata_size' - parent_calldata_size) = 0; - NOT_LAST_EXEC * sel_enter_call * (parent_calldata_size' - rop[4]) = 0; + NOT_LAST_EXEC * sel_enter_call * (parent_calldata_size' - register[3]) = 0; pol NESTED_RET_REV_ONLY = nested_exit_call * (1 - sel_error); + // NESTED_RET_REV_ONLY = 1 ==> rop[1] (resolved operand 2 from execution trace) - // sel_error = 1 ==> last_child_returndata_size' = 0; + // sel_error = 1 ==> last_child_returndata_addr' = 0; // sel_enter_call = 1 ==> last_child_returndata_addr' = 0; + // enqueued_call_start = 1 ==> last_child_returndata_addr = 0; <-- This row is 0 // otherwise = 0 ==> last_child_returndata_addr' = last_child_returndata_addr - #[RD_OFFSET_NEXT_ROW] - NOT_LAST_EXEC * DEFAULT_CTX_ROW * (last_child_returndata_addr' - last_child_returndata_addr) = 0; + #[RET_REV_RD_ADDR] NOT_LAST_EXEC * NESTED_RET_REV_ONLY * (last_child_returndata_addr' - rop[1]) = 0; + #[NEXT_RD_ADDR_IS_ZERO] NOT_LAST_EXEC * (sel_enter_call + sel_error) * last_child_returndata_addr' = 0; + #[RD_ADDR_IS_ZERO] + enqueued_call_start * last_child_returndata_addr = 0; + #[PROPAGATE_RD_ADDR] + NOT_LAST_EXEC * DEFAULT_CTX_ROW * (last_child_returndata_addr' - last_child_returndata_addr) = 0; // NESTED_RET_REV_ONLY = 1 ==> register[0] (intermediate register 1 from execution trace) - // sel_error = 1 ==> last_child_returndata_size' = 0; - // sel_enter_call = 1 ==> last_child_returndata_size' = 0; - // otherwise = 0 ==> last_child_returndata_size' = last_child_returndata_size - #[RD_SIZE_OFFSET_NEXT_ROW] - NOT_LAST_EXEC * DEFAULT_CTX_ROW * (last_child_returndata_size' - last_child_returndata_size) = 0; + // sel_error = 1 ==> last_child_returndata_size' = 0; + // sel_enter_call = 1 ==> last_child_returndata_size' = 0; + // enqueued_call_start = 1 ==> last_child_returndata_size = 0; <-- Current row is 0 + // otherwise = 0 ==> last_child_returndata_size' = last_child_returndata_size + #[RET_REV_RD_SIZE] NOT_LAST_EXEC * NESTED_RET_REV_ONLY * (last_child_returndata_size' - register[0]) = 0; + #[NEXT_RD_SIZE_IS_ZERO] NOT_LAST_EXEC * (sel_enter_call + sel_error) * last_child_returndata_size' = 0; + #[RD_SIZE_IS_ZERO] + enqueued_call_start * last_child_returndata_addr = 0; + #[PROPAGATE_RD_SIZE] + NOT_LAST_EXEC * DEFAULT_CTX_ROW * (last_child_returndata_size' - last_child_returndata_size) = 0; + + // nested_exit_call = 1 ==> last_child_id' = context_id; <-- sel_exit_call includes error case + // sel_enter_call = 1 ==> last_child_id' = 0; + // enqueued_call_start = 1 ==> last_child_id = 0; <-- Current row is 0 + // otherwise = 0 ==> last_child_id' = last_child_id; + #[EXIT_CALL_LAST_CHILD_ID] + NOT_LAST_EXEC * nested_exit_call * (last_child_id' - context_id) = 0; + #[ENTER_CALL_LAST_CHILD_ID] + NOT_LAST_EXEC * sel_enter_call * last_child_id' = 0; + #[LAST_CHILD_ID_IS_ZERO] + enqueued_call_start * last_child_id = 0; + #[PROPAGATE_LAST_CHILD_ID] + NOT_LAST_EXEC * DEFAULT_CTX_ROW * (last_child_id' - last_child_id) = 0; // nested_exit_call = 1 ==> l2_gas_limit' = parent_l2_gas_limit // sel_enter_call = 1 ==> constraints come from call logic @@ -259,6 +313,13 @@ namespace execution; #[PARENT_DA_GAS_USED_STORE_ON_ENTER] NOT_LAST_EXEC * sel_enter_call * (parent_da_gas_used' - da_gas_used) = 0; + // The state of the retrieved bytecodes tree should be continuous unless we have finished an enqueued call + // The tx trace looks up the tree state at the start and end of an enqueued call + #[RETRIEVED_BYTECODES_TREE_ROOT_CONTINUITY] + sel * (1 - enqueued_call_end) * (retrieved_bytecodes_tree_root - prev_retrieved_bytecodes_tree_root') = 0; + #[RETRIEVED_BYTECODES_TREE_SIZE_CONTINUITY] + sel * (1 - enqueued_call_end) * (retrieved_bytecodes_tree_size - prev_retrieved_bytecodes_tree_size') = 0; + #[CTX_STACK_CALL] sel_enter_call { next_context_id, @@ -285,7 +346,8 @@ namespace execution; public_data_tree_size, written_public_data_slots_tree_root, written_public_data_slots_tree_size, - num_unencrypted_logs, + // Retrieved bytecodes tree is not commited and restored, since it's a tx global state. + num_unencrypted_log_fields, num_l2_to_l1_messages } in context_stack.sel { @@ -313,7 +375,7 @@ namespace execution; context_stack.public_data_tree_size, context_stack.written_public_data_slots_tree_root, context_stack.written_public_data_slots_tree_size, - context_stack.num_unencrypted_logs, + context_stack.num_unencrypted_log_fields, context_stack.num_l2_to_l1_messages }; @@ -350,7 +412,7 @@ namespace execution; prev_public_data_tree_size', prev_written_public_data_slots_tree_root', prev_written_public_data_slots_tree_size', - prev_num_unencrypted_logs', + prev_num_unencrypted_log_fields', prev_num_l2_to_l1_messages' } in context_stack.sel { @@ -378,12 +440,12 @@ namespace execution; context_stack.public_data_tree_size, context_stack.written_public_data_slots_tree_root, context_stack.written_public_data_slots_tree_size, - context_stack.num_unencrypted_logs, + context_stack.num_unencrypted_log_fields, context_stack.num_l2_to_l1_messages }; pol commit nested_return; - nested_return = nested_exit_call * sel_execute_return; + nested_return = nested_exit_call * sel_execute_return * (1 - sel_error); #[CTX_STACK_RETURN] nested_return { context_id, @@ -423,15 +485,12 @@ namespace execution; // If any error happened during execution (i.e., sel_error=1), all gas should be consumed. pol SEL_CONSUMED_ALL_GAS = sel_error; - (l2_gas_limit - PREV_GAS_PLUS_USAGE_L2) * SEL_CONSUMED_ALL_GAS + PREV_GAS_PLUS_USAGE_L2 - l2_gas_used = 0; - (da_gas_limit - PREV_GAS_PLUS_USAGE_DA) * SEL_CONSUMED_ALL_GAS + PREV_GAS_PLUS_USAGE_DA - da_gas_used = 0; + (l2_gas_limit - total_gas_l2) * SEL_CONSUMED_ALL_GAS + total_gas_l2 - l2_gas_used = 0; + (da_gas_limit - total_gas_da) * SEL_CONSUMED_ALL_GAS + total_gas_da - da_gas_used = 0; // nested_exit_call = 1 <==> prev_gas_used' = parent_gas_used + gas_used // sel_enter_call = 1 <==> prev_gas_used' = 0 // otherwise = 0 <==> prev_gas_used' = gas_used - pol commit prev_l2_gas_used; - pol commit prev_da_gas_used; - #[L2_GAS_USED_CONTINUITY] NOT_LAST_EXEC * DEFAULT_CTX_ROW * (l2_gas_used - prev_l2_gas_used') = 0; #[L2_GAS_USED_ZERO_AFTER_CALL] @@ -473,6 +532,6 @@ namespace execution; NOT_LAST_EXEC * (l1_l2_tree_root - l1_l2_tree_root') = 0; #[NUM_UNENCRYPTED_LOGS_CONTINUITY] - NOT_LAST_EXEC * DEFAULT_OR_NESTED_RETURN * (num_unencrypted_logs - prev_num_unencrypted_logs') = 0; + NOT_LAST_EXEC * DEFAULT_OR_NESTED_RETURN * (num_unencrypted_log_fields - prev_num_unencrypted_log_fields') = 0; #[NUM_L2_TO_L1_MESSAGES_CONTINUITY] NOT_LAST_EXEC * DEFAULT_OR_NESTED_RETURN * (num_l2_to_l1_messages - prev_num_l2_to_l1_messages') = 0; diff --git a/barretenberg/cpp/pil/vm2/context_stack.pil b/barretenberg/cpp/pil/vm2/context_stack.pil index 69f8dc67fe5b..db6fb7ecaeee 100644 --- a/barretenberg/cpp/pil/vm2/context_stack.pil +++ b/barretenberg/cpp/pil/vm2/context_stack.pil @@ -44,5 +44,5 @@ namespace context_stack; pol commit written_public_data_slots_tree_size; // Side effects state - pol commit num_unencrypted_logs; + pol commit num_unencrypted_log_fields; pol commit num_l2_to_l1_messages; diff --git a/barretenberg/cpp/pil/vm2/data_copy.pil b/barretenberg/cpp/pil/vm2/data_copy.pil index 964916a5f5fd..32c06ef65414 100644 --- a/barretenberg/cpp/pil/vm2/data_copy.pil +++ b/barretenberg/cpp/pil/vm2/data_copy.pil @@ -3,15 +3,36 @@ include "calldata.pil"; include "precomputed.pil"; include "constants_gen.pil"; include "range_check.pil"; +include "context.pil"; /** This trace handles CALLDATACOPY and RETURNDATACOPY - * The data_copy gadget handles CALLDATACOPY (both enqueued and nested) and RETURNDATACOPY + * The data_copy gadget handles CALLDATACOPY and RETURNDATACOPY (both enqueued and nested) + * + * Opcode operands (relevant in EXECUTION when interacting with this gadget): + * - register[0]: copy_size + * - register[1]: copy_offset + * - rop[2]: dst_addr + * + * Memory I/O, this subtrace can potentially read and write across two different memory space ids (indicated by the context_ids) + * All reads are performed in the src context (using the src_context_id) and writes are performed in the current executing + * context (using dst_context_id). + * - M[src_addr]: aka value[0] (the first value read from the src context) + * - the memory tag is ignored for these reads + * - M[src_addr + max_read_index]: aka value[max_read_index] (the last value read from the src context) + * - max_read_index is derived from the copy_size, see pil relations for an explanation + * - the memory tag is ignored for these reads + * - M[dst_addr]: aka output[0] (the first value written to the dst context) + * - guaranteed by this gadget to be FF + * - M[dst_addr + copy_size]: aka output[copy_size] (the last value written to the dst context) + * - guaranteed by this gadget to be FF + * * ERROR HANDLING: * There is one potential errors that is checked: memory out of range accesses * If there are no errors, we read and write the calldata/returndata from the parent/child context to the current context * COMPUTING AMOUNT OF DATA TO READ * We need to ensure that we do not read outside the bounds designated by the parent/child context for their respective data. * this max_read_index is computed via min(data_size, copy_size + copy_offset). + * * READING / WRITING DATA * At each row, the i-th data is simultaneously read from the parent/child and written into the current context * For top level calldatacopy, the data is retrieved from the calldata column. @@ -21,22 +42,42 @@ include "range_check.pil"; * padding rows are constrained to have the value = 0. * * It is memory aware and so is expected to call the memory subtrace directly - * Example: Lookup to execution trace - * execution.sel_data_copy { - * clk, context_id, - * context_id, parent_id - * reg1, mem_tag1, reg2, mem_tag2, rop3 - * parent_callsrc_data_size, parent_calloffset, - * gadget_id - * } - * in - * sel_data_copy { - * clk, context_id, - * src_context_id, dst_context_id, - * copy_size, copy_size_mem_tag, offset, offset_mem_tag, dst_address - * src_data_size, src_addr, - * operation_id - * } + * + * Note that there are two ways that this subtrace is invoked from the execution trace: CD_COPY or RD_COPY + * This requires two permutations because they operate on different execution trace cols (parent vs child) + * CD COPY + * execution.sel_calldata_copy { + * clk, + * parent_id, context_id, + * reg[0], reg[1], rop[2], + * parent_calldata_addr, parent_calldata_size, + * sel_calldata_copy, sel_opcode_error + * } + * in + * sel_start { + * clk, + * src_context_id, dst_context_id, + * copy_size, offset, dst_addr, + * src_addr, src_data_size, + * sel_cd_copy, err + * } + * + * RD COPY + * execution.sel_returndata_copy { + * clk, + * last_child_id, context_id, + * reg[0], reg[1], rop[2], + * last_child_returndata_addr, last_child_returndata_size, + * sel_returndata_copy, sel_opcode_error + * } + * is + * sel_rd_copy_start { + * clk, + * src_context_id, dst_context_id, + * copy_size, offset, dst_addr, + * src_addr, src_data_size, + * sel_rd_copy, err + * }; * * Reading from calldata column * Calldata Trace @@ -47,12 +88,12 @@ include "range_check.pil"; * | 1 | 200 | 2 | 1 | * | 1 | 300 | 3 | 1 | * +-----+-------+-------+------------+ - * Execution Trace (cd_size) (cd_offset) - * +-----+-----+------------+-----------+------------+------------+ - * | clk | sel | context_id | parent_id | register_1 | register_2 | - * +-----+-----+------------+-----------+------------+------------+ - * | 1 | 1 | 1 | 0 | 3 | 0 | - * +-----+-----+------------+-----------+------------+------------+ + * Execution Trace (dst_addr) (cd_size) (cd_offset) + * +-----+-----+------------+-----------+---------------+------------+------------+ + * | clk | sel | context_id | parent_id | resolved_op_2 | register_0 | register_1 | + * +-----+-----+------------+-----------+---------------+------------+------------+ + * | 1 | 1 | 1 | 0 | 0 | 3 | 0 | + * +-----+-----+------------+-----------+---------------+------------+------------+ * DataCopy Trace * +-------------+------------+------------+-----------+------------------+----------+-------+------------+ * | sel_cd_copy | src_ctx_id | dst_ctx_id | copy_size | cd_copy_col_read | cd_index | value | dst_addr | @@ -61,7 +102,7 @@ include "range_check.pil"; * | 1 | 0 | 1 | 2 | 1 | 2 | 200 | 6 | * | 1 | 0 | 1 | 1 | 1 | 3 | 300 | 7 | * +-------------+------------+------------+-----------+------------------+----------+-------+------------+ - */ + */ namespace data_copy; @@ -77,32 +118,12 @@ namespace data_copy; pol commit sel_rd_copy; sel_rd_copy * (1 - sel_rd_copy) = 0; - // Gadget ID is supplied by the execution trace, if non-zero it can be 1 or 2 (instruction spec constrained) - // depending on if the operation is calldatacopy or returndatacopy respectively - pol commit operation_id; - // Bitwise decomposition - operation_id = sel_cd_copy + (2 ** 1) * sel_rd_copy; - // Two varieties depending of if we gate by error - pol SEL_NO_ERR = SEL * (1 - err); - pol commit clk; // Things are range checked to 32 bits - pol commit thirty_two; + pol commit thirty_two; // todo: while we do not support constants in lookups SEL * (thirty_two - 32) = 0; - ////////////////////////////// - // Error Flags - ////////////////////////////// - pol commit src_out_of_range_err; // Read slices should be in MEM range - src_out_of_range_err * (1 - src_out_of_range_err) = 0; - pol commit dst_out_of_range_err; // Write slices should be in MEM range - dst_out_of_range_err * (1 - dst_out_of_range_err) = 0; - - // Consolidate the errors - pol commit err; - err = 1 - (1 - dst_out_of_range_err) * (1 - src_out_of_range_err); - /////////////////////////////// // Inputs from execution trace /////////////////////////////// @@ -121,14 +142,14 @@ namespace data_copy; ////////////////////////////// // These relations occur independent of if we error (mainly because they help in finding out if there is an error) // Start and end constrained in "Control Flow Management" section - pol commit sel_start; - sel_start * (1 - sel_start) = 0; + pol commit sel_start; // sel_start = 1 ==> SEL = 1 + sel_start * (1 - SEL) = 0; // End controls most of the row propagation, so if we error we also set end to turn off row propagation - pol commit sel_end; - sel_end * (1 - sel_end) = 0; + pol commit sel_end; // sel_end = 1 ==> SEL = 1 + sel_end * (1 - SEL) = 0; - // Check if this is a nested or enqueued call + // is_top_level if this is an enqueued call pol commit is_top_level; // == 1 iff parent_id == 0 is_top_level * (1 - is_top_level) = 0; pol commit parent_id_inv; // For zero-check of has_parent_ctx @@ -145,87 +166,95 @@ namespace data_copy; // 1) (offset + copy_size) > src_data_size or // 2) (offset + copy_size) <= src_data_size // if (1) then max_read_index = src_data_size, otherwise max_read_index = (offset + copy_size) - // these are enforced to be correct by are 32 bit range check of |a - b| - - // Convert comparisons to subtractions - check we don't underflow by range checking the absolute difference - pol OFFSET_PLUS_SIZE = offset + copy_size; - pol DATA_SIZE_LT = OFFSET_PLUS_SIZE - src_data_size - 1; // (offset + copy_size) > src_data_size - pol DATA_SIZE_GTE = src_data_size - OFFSET_PLUS_SIZE; // (offset + copy_size) <= src_data_size + pol commit offset_plus_size; + offset_plus_size = sel_start * (offset + copy_size); + pol commit offset_plus_size_is_gt; - pol commit src_data_size_is_lt; // Prover claims which one is the smaller of the two - src_data_size_is_lt * (1 - src_data_size_is_lt) = 0; - - pol MAX_READ_DIFF = src_data_size_is_lt * DATA_SIZE_LT + (1 - src_data_size_is_lt) * DATA_SIZE_GTE; - pol commit abs_diff_max_read_index; // Needed for the range check lookup - SEL * sel_start * (abs_diff_max_read_index - MAX_READ_DIFF) = 0; - #[RANGE_MAX_READ_SIZE_DIFF] - sel_start { abs_diff_max_read_index, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; + #[MAX_READ_INDEX_GT] + sel_start { offset_plus_size, src_data_size, offset_plus_size_is_gt } + in + gt.sel_others { gt.input_a, gt.input_b, gt.res }; - // Based on the prover's claim, we select the smaller of the two - pol MAX_READ_INDEX = src_data_size_is_lt * src_data_size + (1 - src_data_size_is_lt) * OFFSET_PLUS_SIZE; + // Set max_read_index based on the conditions (1) or (2) from above + pol commit max_read_index; + max_read_index = sel_start * ((src_data_size - offset_plus_size) * offset_plus_size_is_gt + offset_plus_size); ////////////////////////////// // Error Handling ////////////////////////////// - // The one error that we need to handle - // Memory Out of Range: If reading or writing would access an address outside of the AVM memory range - // If there is an error, no data copy operation is performed + // Both errors are constrained to be boolean by the lookup into gt. (provided that sel_start == 1). + pol commit src_out_of_range_err; // Read slices should be in MEM range + pol commit dst_out_of_range_err; // Write slices should be in MEM range - // Memory Out of Range, this section checks that the maximum number of reads ans writes do not - // If top level, we trivially succeed since there is no mem read i.e. we cannot have a src_out_of_range_err - pol MAX_READ_ADDR = (src_addr + MAX_READ_INDEX) * (1 - is_top_level); - pol commit abs_read_diff; - #[SRC_OUT_OF_RANGE] // MAX_MEM_ADDR < MAX_READ_ADDR or MAX_MEM_ADDR >= MAX_READ_ADDR - SEL * sel_start * (src_out_of_range_err * (MAX_READ_ADDR - MAX_MEM_ADDR - 1) + (1 - src_out_of_range_err) * (MAX_MEM_ADDR - MAX_READ_ADDR) - abs_read_diff) = 0; + pol commit max_mem_addr; // todo: While we do not support constants + sel_start * (max_mem_addr - constants.AVM_HIGHEST_MEM_ADDRESS) = 0; + + // MAX_READ_ADDR = 0 if this is a top level call since there aren't any memory reads at the top level + // conceptually cd copy and rd copy don't perform reads at the top level. + pol MAX_READ_ADDR = src_addr + max_read_index; + pol commit max_read_addr; + max_read_addr = sel_start * MAX_READ_ADDR; + #[CHECK_SRC_ADDR_IN_RANGE] + sel_start { max_read_addr, max_mem_addr, src_out_of_range_err } + in + gt.sel_others { gt.input_a, gt.input_b, gt.res }; pol MAX_WRITE_ADDR = dst_addr + copy_size; - pol commit abs_write_diff; - #[DST_OUT_OF_RANGE] // MAX_MEM_ADDR < MAX_WRITE_ADDR or MAX_MEM_ADDR >= MAX_WRITE_ADDR - SEL * sel_start * (dst_out_of_range_err * (MAX_WRITE_ADDR - MAX_MEM_ADDR - 1) + (1 - dst_out_of_range_err) * (MAX_MEM_ADDR - MAX_WRITE_ADDR) - abs_write_diff) = 0; + pol commit max_write_addr; + max_write_addr = sel_start * MAX_WRITE_ADDR; + #[CHECK_DST_ADDR_IN_RANGE] + sel_start { max_write_addr, max_mem_addr, dst_out_of_range_err } + in + gt.sel_others { gt.input_a, gt.input_b, gt.res }; - #[RANGE_READ] - sel_start { abs_read_diff, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; - #[RANGE_WRITE] - sel_start { abs_write_diff, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; + // Consolidate the errors + pol commit err; + err = 1 - (1 - dst_out_of_range_err) * (1 - src_out_of_range_err); ////////////////////////////// - // Control flow management + // Control flow management ////////////////////////////// pol commit sel_start_no_err; sel_start_no_err * (1 - sel_start_no_err) = 0; sel_start_no_err = sel_start * (1 - err); - // An active row succeeding sel_end has to be a sel_start + // An active row succeeding sel_end has to be a sel_start #[START_AFTER_END] (sel_cd_copy' + sel_rd_copy') * sel_end * (sel_start' - 1) = 0; + pol commit sel_write_count_is_zero; + pol commit write_count_zero_inv; // Could optimise by using the existing write_count_minus_on_inv + // sel_write_count_is_zero = 1 IFF copy_size = 0 && sel_start = 1 (and there are no errors) + #[ZERO_SIZED_WRITE] + sel_start_no_err * (copy_size * (sel_write_count_is_zero * (1 - write_count_zero_inv) + write_count_zero_inv) - 1 + sel_write_count_is_zero) = 0; + #[END_IF_WRITE_IS_ZERO] + sel_start_no_err * sel_write_count_is_zero * (sel_end - 1) = 0; + + pol SEL_PERFORM_COPY = sel_start_no_err * (1 - sel_write_count_is_zero) + SEL * (1 - sel_start); + pol WRITE_COUNT_MINUS_ONE = copy_size - 1; pol commit write_count_minus_one_inv; // sel_end = 1 IFF copy_size - 1 = 0; #[END_WRITE_CONDITION] - SEL_NO_ERR * (WRITE_COUNT_MINUS_ONE * (sel_end * (1 - write_count_minus_one_inv) + write_count_minus_one_inv) - 1 + sel_end) = 0; + SEL_PERFORM_COPY * (WRITE_COUNT_MINUS_ONE * (sel_end * (1 - write_count_minus_one_inv) + write_count_minus_one_inv) - 1 + sel_end) = 0; #[END_ON_ERR] // sel_end = 1 if error err * (sel_end - 1) = 0; pol commit reads_left; // Number of reads of the src data, if reads_left = 0 but copy_size != 0 then it is a padding row - // Src data elements are read from indicies [offset, MAX_READ_INDEX], therefore reads_left = MAX_READ_INDEX - offset - // We need to be careful that MAX_READ_INDEX - offset does not underflow (i.e. when offset > MAX_READ_INDEX, reads_left = 0) - pol OFFSET_GT_MAX_READ = offset - MAX_READ_INDEX - 1; // offset > MAX_READ_INDEX - pol OFFSET_LTE_MAX_READ = MAX_READ_INDEX - offset; // offset <= MAX_READ_INDEX - pol commit sel_offset_gt_max_read; - sel_offset_gt_max_read * (1 - sel_offset_gt_max_read) = 0; - - pol commit abs_max_read_offset; // Needed for lookup - abs_max_read_offset = SEL * sel_start_no_err * (sel_offset_gt_max_read * OFFSET_GT_MAX_READ + (1 - sel_offset_gt_max_read) * OFFSET_LTE_MAX_READ); - - #[RANGE_READS_LEFT] - sel_start_no_err { abs_max_read_offset, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; + // src data elements are read from indicies [offset, max_read_index], therefore reads_left = max_read_index - offset + // We need to be careful that max_read_index - offset does not underflow (i.e. when offset > max_read_index, reads_left = 0) + // We test that condition here + pol commit offset_gt_max_read_index; + #[OFFSET_GT_MAX_READ_INDEX] + sel_start_no_err { offset, max_read_index, offset_gt_max_read_index } + in + gt.sel_others { gt.input_a, gt.input_b, gt.res }; // If sel_offset_gt_max_read = 1 (i.e. when offset > MAX_READ_INDEX, reads_left = 0) // otherwise, reads_left = MAX_READ_INDEX - offset - #[INIT_READS_LEFT] - SEL * sel_start_no_err * (reads_left - OFFSET_LTE_MAX_READ * (1 - sel_offset_gt_max_read)) = 0; + #[INIT_READS_LEFT] + sel_start_no_err * (1 - sel_write_count_is_zero) * (reads_left - (max_read_index - offset) * (1 - offset_gt_max_read_index)) = 0; ////////////////////////////// // Execute Data Copy @@ -233,7 +262,7 @@ namespace data_copy; // Most of these relations are either gated explicitly by an err or by sel_end (which is 1 when err = 1) // ===== Writing to dst_context_id ===== pol commit sel_mem_write; - sel_mem_write = SEL_NO_ERR; // We write if there is no error + sel_mem_write = SEL_PERFORM_COPY; // We write if there is no error and copy_size != 0 // Data copy size decrements for each row until we end #[DECR_COPY_SIZE] SEL * (1 - sel_end) * (copy_size' - copy_size + 1) = 0; @@ -242,50 +271,98 @@ namespace data_copy; (1 - precomputed.first_row) * SEL * (1 - sel_end) * (dst_addr' - dst_addr - 1) = 0; #[MEM_WRITE] - sel_mem_write { clk, dst_addr, value, /*mem_tag=*/precomputed.zero/*(FF)*/, /*rw=*/sel_mem_write/*(write)*/, dst_context_id } - in - memory.sel { memory.clk, memory.address, memory.value, memory.tag, memory.rw, memory.space_id }; + sel_mem_write { clk, dst_context_id, dst_addr, value, /*mem_tag=*/precomputed.zero/*(FF)*/, /*rw=*/sel_mem_write/*(write)*/ } + is + memory.sel_data_copy_write { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // ===== Reading for nested call ===== pol commit read_addr; // The addr to start reading the data from: src_addr + offset; - #[INIT_READ_ADDR] - SEL * sel_start_no_err * (read_addr - src_addr - offset) = 0; + #[INIT_READ_ADDR] // Only occurs at the start if we have not errored + sel_start_no_err * (1 - sel_write_count_is_zero) * (read_addr - src_addr - offset) = 0; // Subsequent read addrs are incremented by 1 unless this is a padding row #[INCR_READ_ADDR] SEL * (1 - padding) * (1 - sel_end) * (read_addr' - read_addr - 1) = 0; - // Read count decrements + // Read count decrements #[DECR_READ_COUNT] SEL * (1 - padding) * (1 - sel_end) * (reads_left' - reads_left + 1) = 0; pol commit padding; // Padding = 1 if reads_left = 0 pol commit reads_left_inv; #[PADDING_CONDITION] - SEL_NO_ERR * (reads_left * (padding * (1 - reads_left_inv) + reads_left_inv) - 1 + padding) = 0; + SEL_PERFORM_COPY * (reads_left * (padding * (1 - reads_left_inv) + reads_left_inv) - 1 + padding) = 0; // Read from memory if we are not the top level call and not a padding row pol commit sel_mem_read; // If the current row is a memory op read - sel_mem_read = SEL_NO_ERR * (1 - is_top_level) * (1 - padding); + sel_mem_read = SEL_PERFORM_COPY * (1 - is_top_level) * (1 - padding); // === Value Padding === pol commit value; #[PAD_VALUE] - SEL_NO_ERR * padding * value = 0; + SEL_PERFORM_COPY * padding * value = 0; #[MEM_READ] - sel_mem_read { clk, read_addr, value, /*mem_tag=*/precomputed.zero/*FF*/, /*rw=*/precomputed.zero/*(read)*/, src_context_id } - in - memory.sel { memory.clk, memory.address, memory.value, memory.tag, memory.rw, memory.space_id }; + sel_mem_read { clk, src_context_id, read_addr, value, /*mem_tag=*/precomputed.zero/*FF*/, /*rw=*/precomputed.zero/*(read)*/ } + is + memory.sel_data_copy_read { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // ===== Reading cd column for top level cd copy ===== // Reading from column - // TODO: data size for top level column reads need to be constrained - // After calldata hashing pol commit cd_copy_col_read; #[CD_COPY_COLUMN] - cd_copy_col_read = SEL_NO_ERR * (1 - padding) * is_top_level * sel_cd_copy; + cd_copy_col_read = SEL_PERFORM_COPY * (1 - padding) * is_top_level * sel_cd_copy; + + // The calldata trace starts at index = 1 (TODO: We need this temporarily while we dont allow for aliases in the lookup tuple): + pol commit read_addr_plus_one; + read_addr_plus_one = cd_copy_col_read * (read_addr + 1); #[COL_READ] - cd_copy_col_read { value, dst_context_id, read_addr } + cd_copy_col_read { value, dst_context_id, read_addr_plus_one } in calldata.sel { calldata.value, calldata.context_id, calldata.index }; + //////////////////////////////////////////////// + // Dispatch Permutation + //////////////////////////////////////////////// + // Since these are permutations, we need to distinguish between the start + // of a cd_copy and rd_copy. + // Note that the value of sel_cd_copy and sel_rd_copy are constrained by their + // inclusion inside the permutation. + + pol commit sel_cd_copy_start; + sel_cd_copy_start = sel_start * sel_cd_copy; + #[DISPATCH_CD_COPY] + execution.sel_execute_calldata_copy { + precomputed.clk, + execution.parent_id, execution.context_id, + execution.register[0], execution.register[1], execution.rop[2], + execution.parent_calldata_addr, execution.parent_calldata_size, + execution.sel_execute_calldata_copy/*=1*/, execution.sel_opcode_error + } + is + sel_cd_copy_start { + clk, + src_context_id, dst_context_id, + copy_size, offset, dst_addr, + src_addr, src_data_size, + sel_cd_copy, err + }; + + pol commit sel_rd_copy_start; + sel_rd_copy_start = sel_start * sel_rd_copy; + #[DISPATCH_RD_COPY] + execution.sel_execute_returndata_copy { + precomputed.clk, + execution.last_child_id, execution.context_id, + execution.register[0], execution.register[1], execution.rop[2], + execution.last_child_returndata_addr, execution.last_child_returndata_size, + execution.sel_execute_returndata_copy/*=1*/, execution.sel_opcode_error + } + is + sel_rd_copy_start { + clk, + src_context_id, dst_context_id, + copy_size, offset, dst_addr, + src_addr, src_data_size, + sel_rd_copy, err + }; + diff --git a/barretenberg/cpp/pil/vm2/docs/getcontractinstance-design-doc.md b/barretenberg/cpp/pil/vm2/docs/getcontractinstance-design-doc.md deleted file mode 100644 index 8f1a0d84da37..000000000000 --- a/barretenberg/cpp/pil/vm2/docs/getcontractinstance-design-doc.md +++ /dev/null @@ -1,436 +0,0 @@ -# GetContractInstance Opcode Design Document - -## Overview - -The `GetContractInstance` opcode allows contracts to query whether a contract instance exists at a given address and retrieve specific members from that instance. Its core operation is to retrieve a specified contract instance and write the selected instance member to memory. - -This implementation introduces two new C++ simulation components to the AVM: a `GetContractInstance` opcode component and a shared `ContractInstanceManager` component for contract instance retrieval. Each component corresponds to a PIL gadget that it generates events for `get_contract_instance.pil` and `contract_instance_retrieval.pil` respectively. - -The core contract instance retrieval operation is a nullifier existence check, address derivation, and an update check. This retrieval is performed today by the `BytecodeManager` (and its `bc_retrieval.pil`) inline. Since this document proposes a new dedicated component for contract instance retrieval, the existing `BytecodeManager` simulation component can be modified (and simplified) to use this new `ContractInstanceManager`. Likewise, `bc_retrieval.pil` can be simplified via a consolidated interaction with `contract_instance_retrieval.pil`. - -## Architecture - -### Architectural Overview - -The `GetContractInstance` implementation introduces a layered architecture that promotes code reuse and clear separation of concerns: - -#### New Components - -1. **`GetContractInstance` Opcode Component** (`get_contract_instance.cpp` / `get_contract_instance.pil`) - - **NEW OPCODE COMPONENT**: Main entry point for the `GETCONTRACTINSTANCE` instruction - - Validates inputs (bounds checking, member enum validation) - - Orchestrates contract instance retrieval via the shared manager - - Writes results to memory with proper tagging - - Handles opcode-specific error conditions - -2. **`ContractInstanceManager` / `ContractInstanceRetrieval`** (`contract_instance_manager.cpp` / `contract_instance_retrieval.pil`) - - **NEW SHARED COMPONENT**: Centralizes contract instance retrieval logic - - Performs core validation: nullifier checking, address derivation, update validation - - Provides caching to avoid duplicate lookups within the same execution context - - Used by both `GetContractInstance` opcode and bytecode retrieval system - - Emits events for PIL trace generation - -#### Modified Components - -1. **`BytecodeManager` / `BytecodeRetrieval`** (`bytecode_manager.cpp` / `bc_retrieval.pil`) - - **SIMPLIFIED**: No longer directly interacts with nullifier checking, address derivation, or update validation - - Now delegates contract instance validation to the shared `ContractInstanceManager` - - Still performs other bytecode-related concerns (class ID derivation, bytecode hashing) - - Simplified in both simulation and PIL - -2. **Precomputed Tables** (`precomputed_trace.cpp` / `precomputed.pil`) - - **EXTENDED**: New table for `GetContractInstance` - - Enables member enum validation and decomposition into boolean flags - - Follows existing patterns used by other opcodes (e.g., `GetEnvVar`) - -#### Relevant Unchanged Components - -This section lists a few components that are unchanged here, but are referenced in this document. - -**Used by opcode component and gadget**: - - **Memory**: Used by the memory-aware opcode component to write the opcode results to memory. - - _Note_: this opcode relies on `Execution` to perform address resolution, tag checking and memory reads. -**Used during core contract instance retrieval**: - - **Nullifier Check**: Validates deployment nullifier existence. Used during contract instance retrieval. - - **Address Derivation**: Ensures contract addresses match their constituent parts. - - **Update Check**: Validates contract class ID updates - -### Why a Shared Core Component for Contract Instance Retrieval? - -Why make the choice to have a single core component for contract instance retrieval for use by bytecode retrieval and by the `GetContractInstance` opcode? The primary motivation is to simplify the bytecode retrieval and `GetContractInstance` opcode components so that they do not need to directly interact with Nullifier Checking, Address Derivation, and Update checking themselves. This simplification also reduces column count because we don't need to duplicate all of the columns for interacting with these inner components, especially address derivation which has a wide interface. At first, we thought we'd be able to de-duplicate contract instance retrievals, but that proved to be less useful than we originally envisioned because de-duplication would depend on the current nullifier tree root and public data tree root. See [Appendix A - Deduplication of Bytecode and Contract Instance Retrievals](#appendix-a---deduplication-of-bytecode-and-contract-instance-retrievals) for more information on de-duplication. - -### Component Interaction Diagram - -```mermaid -graph TD - -%% Entry point -Execution --> BytecodeManagerRetrieval -Execution --> GetContractInstanceOpcode - -%% Bytecode Manager / Retrieval paths -BytecodeManagerRetrieval --> ClassIDDerivation -BytecodeManagerRetrieval --> BytecodeHashing -BytecodeManagerRetrieval --> ContractInstanceManager - -%% GetContractInstanceOpcode paths -GetContractInstanceOpcode --> PrecomputedTable -GetContractInstanceOpcode --> Memory -GetContractInstanceOpcode --> ContractInstanceManager - -%% Contract Instance Manager paths -ContractInstanceManager --> NullifierCheck -ContractInstanceManager --> AddressDerivation -ContractInstanceManager --> UpdateCheck - -%% Node labels -Execution["Execution"] -BytecodeManagerRetrieval["Bytecode Manager / Retrieval"] -GetContractInstanceOpcode["GetContractInstance Opcode"] -ClassIDDerivation["Class ID Derivation"] -BytecodeHashing["Bytecode Hashing"] -ContractInstanceManager["Contract Instance Manager / Retrieval"] -PrecomputedTable["Precomputed Table"] -Memory["Memory"] -NullifierCheck["Nullifier Check"] -AddressDerivation["Address Derivation"] -UpdateCheck["Update Check"] -``` - -## Opcode Interface and Errors - -``` -GETCONTRACTINSTANCE addressOffset, dstOffset, memberEnum -``` - -**Parameters:** -- `addressOffset`: Memory offset containing the contract address to query (must be tagged FF) -- `dstOffset`: Memory offset where results will be written -- `memberEnum`: Enum value specifying which instance member to retrieve: - - 0 = DEPLOYER - - 1 = CLASS_ID - - 2 = INIT_HASH - -**Memory Operations:** -- `M[addressOffset]`: Input contract address (FF) - Read and tag-checking performed by **execution**! -- `M[dstOffset]`: Output existence flag (U1) - Write and tag-assignment performed by opcode component and gadget -- `M[dstOffset+1]`: Output member value (FF) - Write and tag-assignment performed by opcode component and gadget - -**Errors:** -1. **Out of bounds write**: `dstOffset == 2^32 - 1` implies that a write to `dstOffset+1` is out-of-bounds -2. **Invalid member enum**: `memberEnum > 2` - -## Supporting Types - -```cpp -struct ContractInstance { - FF salt; - AztecAddress deployer_addr; - ContractClassId current_class_id; - ContractClassId original_class_id; - FF initialisation_hash; - PublicKeys public_keys; -}; - -enum class ContractInstanceMember { - DEPLOYER = 0, - CLASS_ID = 1, - INIT_HASH = 2, - MAX = INIT_HASH, -}; -``` - -## Component Design - -### GetContractInstance Opcode Component - -#### Overview -The main opcode component that handles the `GETCONTRACTINSTANCE` instruction. It validates inputs, handles errors, retrieves contract instance data via the shared core retrieval component, and writes results to memory. - -#### Simulation Interface -```cpp -class GetContractInstanceInterface { -public: - virtual ~GetContractInstanceInterface() = default; - virtual void get_contract_instance( - MemoryInterface& memory, - AztecAddress contract_address, - MemoryAddress dst_offset, - uint8_t member_enum - ) = 0; -}; - -struct GetContractInstanceEvent { - // Interface - uint32_t execution_clk; - AztecAddress contract_address; - MemoryAddress dst_offset; - uint8_t member_enum; - uint32_t space_id; - FF nullifier_tree_root; - FF public_data_tree_root; - - // Instance retrieval results - bool instance_exists; - FF retrieved_deployer_addr; - FF retrieved_class_id; - FF retrieved_init_hash; -}; -``` - -#### PIL Interface -- **Used by:** `execution.pil` (opcode dispatch) -- **Uses:** `contract_instance_retrieval.pil`, precomputed table, memory - -**Usage:** -``` -sel_execute_get_contract_instance { - // inputs - precomputed.clk, - register[0], - rop[1], - rop[2], - context_id, - context_stack.nullifier_tree_root, - context_stack.public_data_tree_root, - // outputs/errors - sel_opcode_error -} is get_contract_instance.sel { - // inputs - get_contract_instance.clk, - get_contract_instance.contract_address, - get_contract_instance.dst_offset, - get_contract_instance.member_enum, - get_contract_instance.space_id, - get_contract_instance.nullifier_tree_root, - get_contract_instance.public_data_tree_root, - // outputs/errors - get_contract_instance.error -}; -``` - -#### Implementation -**Simulation (`get_contract_instance.cpp`):** -1. Validates bounds (`dst_offset + 1` must not exceed `AVM_HIGHEST_MEM_ADDRESS`) -2. Validates member enum (`enum < MAX`) -3. Calls `ContractInstanceManager` for instance retrieval -4. Extracts requested member based on enum (`deployer/class_id/init_hash`) -5. Writes exists flag (`U1`) and member value (`FF`) to memory -6. `GetContractInstanceEvent` emission for trace generation of `get_contract_instance.pil` - -_Note_: reading of contract address from memory (along with address resolution and tag checking) is performed by execution. - -**PIL (`get_contract_instance.pil`) constraints:** -1. Bounds checking: `dst_offset + 1 < 2^32` -2. Member enum validation and decomposition via precomputed table lookup -3. Aggregation of errors into a single selector to return to execution and drive `sel_opcode_error` -4. Writes exists flag (`U1`) and member value (`FF`) to memory -5. Instance retrieval lookup to `contract_instance_retrieval` gadget - -### Core Component - ContractInstanceManager (for Contract Instance Retrieval) - -#### Overview -Central component for contract instance retrieval, shared between `GetContractInstance` opcode and bytecode retrieval. Handles nullifier checking, address validation, and contract update verification. - -#### Simulation Interface -```cpp -class ContractInstanceManagerInterface { -public: - virtual ~ContractInstanceManagerInterface() = default; - virtual std::optional get_contract_instance( - const FF& contract_address - ) = 0; -}; - -struct ContractInstanceRetrievalEvent { - AztecAddress address; - ContractInstance contract_instance; - - // Tree context - FF nullifier_tree_root; - FF public_data_tree_root; - - // Nullifier info - FF deployment_nullifier; - bool nullifier_exists; - AztecAddress deployer_protocol_contract_address; - - bool error; -}; -``` - -#### PIL Interface -- **Used by:** `get_contract_instance.pil`, `bc_retrieval.pil` -- **Uses:** `nullifier_check`, `address_derivation`, `update_check` - -**Usage:** -```pil -sel { - // inputs - execution_or_bc_retrieval.address, - execution_or_bc_retrieval.nullifier_tree_root, - execution_or_bc_retrieval.public_data_tree_root, - // outputs - execution_or_bc_retrieval.exists, - execution_or_bc_retrieval.deployer_addr, // situational - execution_or_bc_retrieval.current_class_id, - execution_or_bc_retrieval.init_hash // situational - } in contract_instance_retrieval.sel { - // inputs - contract_instance_retrieval.address, - contract_instance_retrieval.nullifier_tree_root, - contract_instance_retrieval.public_data_tree_root, - // outputs - contract_instance_retrieval.exists, - contract_instance_retrieval.deployer_addr, // situational - contract_instance_retrieval.current_class_id, - contract_instance_retrieval.init_hash // situational - }; -``` - -**NOTE:** bytecode instance retrieval does not need to use `deployer_addr` and `init_hash` in its lookup to contract instance retrieval. It is enough that internally contract instance retrieval enforces that its instance member columns can be used to derive the provided address. Bytecode retrieval does not actually use those members in any of its own constraints. - -#### Implementation -**Simulation (`contract_instance_manager.cpp`):** -1. Nullifier existence checking via `merkle_db` -2. Contract instance retrieval from `contract_db` -3. Update validation via `UpdateCheck` component -4. `ContractInstanceRetrievalEvent` emission for trace generation of `contract_instance_retrieval.pil` - -**PIL (`contract_instance_retrieval.pil`):** -1. Retrieval: Given a contract address and context, returns contract instance data and existence flag -2. Nullifier check: Validates deployment nullifier exists -3. Address derivation: Ensures address matches its components -4. Update check: Validates current class ID is valid -5. No errors: This gadget does not have any errors - -### Precomputed Table Component - -#### Overview -For member enum validation and decomposition to boolean flags. - -#### Table Structure -| member_enum | is_valid_member_enum | is_deployer | is_class_id | is_init_hash | -|-------------|---------------------|-------------|-------------|--------------| -| 0 | 1 | 1 | 0 | 0 | -| 1 | 1 | 0 | 1 | 0 | -| 2 | 1 | 0 | 0 | 1 | -| 3-255 | 0 | 0 | 0 | 0 | - -#### PIL Interface -**Used by:** `get_contract_instance.pil` - -```pil -// Input -/*clk=*/ member_enum, -// Outputs -is_valid_member_enum, -is_deployer, -is_class_id, -is_init_hash -``` - -#### Implementation -Similar to `PhaseTable` and `GetEnvVarSpec`, tracegen can reuse a single spec here for the precomputed table and the `GetContractInstance` tracegen. - -## Pre-existing components - -### Bytecode Manager (for Bytecode Retrieval) -**MODIFIED!** This component no longer interacts with address derivation, nullifier checking and update checking itself, and instead just interacts with the `ContractInstanceManager` (or `contract_instance_retrieval.pil` in PIL). - -#### PIL Interface -**Used here by:** `execution.pil` -**Uses:** `contract_instance_retrieval.pil` and `class_id_derivation.pil` (previously used `address_derivation.pil`, `nullifier_check.pil`, `update_check.pil`) - -```pil -sel { - bytecode_id, contract_address, sel_bytecode_retrieval_failure -} in bc_retrieval.sel { - bc_retrieval.bytecode_id, bc_retrieval.address, bc_retrieval.error -}; -``` - -### Memory -**NOT NEW!** Just here for reference. - -#### PIL Interface -**Used here by:** `get_contract_instance.pil` - -```pil -clk, // Input: execution clock -address, // Input: memory address -value, // Input/Output: memory value -tag, // Input: value tag (U1, FF, etc.) -rw, // Input: read(0) or write(1) -space_id // Input: memory space identifier -``` - -### Nullifier Check -**NOT NEW!** Just here for reference. - -#### PIL Interface -**Used here by:** `contract_instance_retrieval.pil` (previously by `bc_retrieval.pil`) - -```pil -exists, // Output: whether nullifier exists -nullifier, // Input: nullifier value to check (contract address in our case) -root, // Input: nullifier tree root -address, // Input: contract address for siloing -should_silo // Input: whether to silo the nullifier -``` - -### Address Derivation Component -**NOT NEW!** Just here for reference. - -#### PIL Interface -**Used by:** `contract_instance_retrieval.pil` (previously by `bc_retrieval.pil`) - -```pil -address, -salt, // HINTED -deployer_addr, -class_id, -init_hash, -nullifier_key_x, -nullifier_key_y, -// other inputs... -``` - -### Update Check Component -**NOT NEW!** Just here for reference. - -#### PIL Interface -**Used by:** `contract_instance_retrieval.pil` (previously by `bc_retrieval.pil`) - -```pil -address, -current_class_id, -original_class_id, -public_data_tree_root, -timestamp -``` - -## Alternative Approaches - -We can consider _not_ having a core component for contract instance retrieval, and instead repeat its component interactions (to address derivation, nullifier checking, and update checking) in both bytecode retrieval and the `GetContractInstance` opcode. This removes one level of indirection, but increases the responsibilities of bytecode retrieval and of the `GetContractInstance` opcode components. It certainly reduces readability of those components, especially because of the many extra columns needed for address derivation. - - -## Appendix A - Deduplication of Bytecode and Contract Instance Retrievals - -Today, the Bytecode Manager deduplicates retrievals based solely on contract address. This means that if a contract is invoked multiple times (via enqueued or nested calls) bytecode retrieval is performed once and generates only an event for the first retrieval. - -This works in simulation, but creates a problem in circuit land. The issue is that **nullifier checks and update checks rely on the *current* tree roots**. The PIL interface to bytecode retrieval today omits tree roots, but it must be modified to include the nullifier and public data tree roots to forward to nullifier and update checking. Specifically, both the **Bytecode Retrieval Gadget** and the newly proposed **Contract Instance Retrieval Gadget** must include the **Nullifier Tree Root** and **Public Data Tree Root** as part of their interface. - -In other words, when performing a lookup, the input tuple must include the current tree roots. The execution trace must provide those roots in its lookup tuple to bytecode retrieval. The first time a bytecode retrieval is performed for a given contract address, this should be fine. But in subsequent interactions from execution, the roots may have changed. Execution will provide latest tree roots in the lookup tuple to bytecode retrieval, but (today) the bytecode retrieval trace will only include the first retrieval for that address with old roots because simulation only emitted one event for the very first retrieval of that bytecode. - -So, our current deduplication scheme is broken for bytecode retrieval. - -### **Options** - -We see two viable paths forward: - -1. **Option 1: Always use “start” roots from public inputs** - Always perform bytecode and contract instance retrievals using the **AVM’s PI start roots**. This would introduce ugliness in simulation and PIL, but would accomplish deduplication. - -2. **Option 2: Stop deduplicating at the “Manager/Retrieval” level** - Stop deduplicating bytecode and contract instance retrievals. Instead, rely on deduplication within the stateless inner gadgets (address derivation, bytecode hashing, and class ID derivation). I believe this is where we get the most gains for deduplication anyway\! diff --git a/barretenberg/cpp/pil/vm2/docs/skippable.md b/barretenberg/cpp/pil/vm2/docs/skippable.md index bab7a97f8b06..3babb0ad8693 100644 --- a/barretenberg/cpp/pil/vm2/docs/skippable.md +++ b/barretenberg/cpp/pil/vm2/docs/skippable.md @@ -2,13 +2,13 @@ ## Introduction -For each sub-trace defined in a .pil file, one can optionally add so-called "skippable" condition which allows to improve performance on prover side whenever the "skippable" condition is satisfied. It basically skips some accumulation computation in sumcheck protocol to all sub-relations pertaining to the sub-trace. More on how to define a valid "skippable" condition in the next section. We emphasize that the "skippable" mechanism does not change the behavior of the verifier and therefore does not present any security risk about soundness, i.e., it does not help a malicious prover to prove a wrong statement even if the "skippable" condition is too relaxed. What can however happen is that the verification fails when it should not (perfect completeness is not guarenteed anymore if we wrongly skip). +For each sub-trace defined in a .pil file, one can optionally add so-called "skippable" condition which allows to improve performance on prover side whenever the "skippable" condition is satisfied. It basically skips some accumulation computation in sumcheck protocol to all sub-relations pertaining to the sub-trace. Here a "sub-trace" can be virtual or not, i.e., each virtual sub-trace has their own skippable condition (which might be the same though). More on how to define a valid "skippable" condition in the next section. We emphasize that the "skippable" mechanism does not change the behavior of the verifier and therefore does not present any security risk about soundness, i.e., it does not help a malicious prover to prove a wrong statement even if the "skippable" condition is too relaxed. What can however happen is that the verification fails when it should not (perfect completeness is not guaranteed anymore if we wrongly skip). ## Explanations -A sub-trace contains a list of algebraic sub-relations/equations which must be satisfied. We do not consider lookups nor permutations in this discussion. +A sub-trace contains a list of algebraic sub-relations/equations which must be satisfied. We do not consider lookups nor permutations in this discussion (they are using the same skippable condition defined in interactions_base.hpp consisting in skipping when the inverse column entry is zero). -A valid "skippable" condition is a condition defined on some columns which guarantee that the accumulation in sumcheck rounds will be zero for this sub-trace, i.e., any sub-relation contribution will be zero. This means that the sub-relation is satisfied with columns values accumulated during sumcheck rounds. +A valid "skippable" condition is a condition defined on some columns which guarantee that the accumulation in sumcheck rounds will be zero for this sub-trace, i.e., any sub-relation contribution will be zero. This means that the sub-relation is satisfied (equal to zero) with columns values accumulated during sumcheck rounds. ## Strong Skippable Condition @@ -26,24 +26,58 @@ We name such a condition "strong" and show in next section that this can be rela ## Valid Relaxed Skippable Condition At each round of the sumcheck protocol, two contiguous rows are "merged". -For each column, the merging consist in computing the following based on a challenge $\alpha$ (random value over FF): +For each column, the merging step consists in computing the following based on a challenge $\alpha$ (random value over FF): $$ ColMerged{_i} = (1 - \alpha) \cdot Col_i + \alpha \cdot Col_{i+1} $$ for every even i ($Col_i$ denotes the ith row element of $Col$). Then, each "merged value" is evaluated in your sub-relation. Note that $ColMerged_i$ is more or less random except when $Col_i$ and $Col_{i+1}$ are zeros. Assume that for a given sub-relation all $ColMerged_i$ are "random" except for the term satisfying the skippable condition. Then it will evaluate to zero and can be effectively skipped. (Assuming the strong definition of skippable where the skippable condition can nullify a sub-relation no matter what are the other values.) -Now, one can leverage on the fact that we are using our witness generator to use the skippable in a more generous manner by leveraging columns that are guaranteed to be zero whenever the skippable condition is true and that this particular column being zero nullifies a sub-relation (in other words the column entry is multiplicative factor of the sub-relation). +Now, one can leverage on the fact that we are using our witness generator to use the skippable in a more generous manner by taking advantage of columns that are guaranteed to be zero whenever the skippable condition is true and that this specific column being zero nullifies a sub-relation (in other words the column entry is a multiplicative factor of the sub-relation). Let us take an example of a subtrace with two subrelations over columns $a$, $b$, $c$: $$ -a \cdot (b+c) = 0 +a \cdot (b + c) = 0 $$ $$ -b \cdot (1- b) = 0 +b \cdot (1 - b) = 0 $$ Skippable condition $a == 0$. -Strictly speaking, the skippable condition does not algebraically nullifies the second equation ($b$ is almost a free variable) -However, if we assume that our witness generator will always set $b = 0$ whenever $a== 0$, then we are fine. Namely, for every pair of contiguous rows with skippable condition being satisfied, the merged row will be skippable. For these two rows, we know that $b == 0$ and that the merged row entry will be zero as well. Therefore, contribution for the second sub-relation can be skipped. +Strictly speaking, the skippable condition does not algebraically nullify the second equation ($b$ is almost a free variable). +However, if we know that our witness generator will always set $b = 0$ whenever $a == 0$, then we are fine. Namely, for every pair of contiguous rows with skippable condition being satisfied, the merged row will be skippable. For these two rows, we know that $b == 0$ and that the merged row entry will be zero as well. Therefore, contribution for the second sub-relation can be skipped. + +WARNING: If $a == 0$ would imply $b == 1$, this would not work even though $b == 1$ satisfy the second relation. Namely, after merging two rows, $b$ would be randomized and the second relation would not be satisfied but the merged $a$ value would be zero and wrongly satisfy the skippable condition. + +## Special Case with First Row and Empty Sub-trace + +We discuss another little optimization specific to the very first row which is mostly empty when we use sub-relations involving shifted values. Usually, the activation selector $sel$ is not set for this row but the fixed selector "precomputed.first_row" is active. + +Assume that our sub-trace is empty (except the first fixed row) i.e., 'sel == 0' for every row. It would be desirable to be able to use the standard skippable condition $sel == 0$. This is possible if $sel' == 0$ and $sel == 0$ guarantee to nullify +all sub-relations. + +The reason is that the skippable mechanism applies simultaneously over the two contiguous rows which will be merged. In other words, the contribution will be skipped if the skippable condition holds on both rows. For the skippable condition +$sel == 0$, this means that if the second row is skippable the first row must have $sel' == 0$. As the very first row is merged with the second one in the first round, this applies. + +If the skippable mechanism were not simultaneous over the two rows then the first row might have been discarded wrongly. + +As an example from memory.pil: + +$(1 - precomputed.firstRow) * (1 - sel) * sel' = 0;$ + +Even if "precomputed.first_row" becomes randomized, we can rely on $sel'$ to nullify this sub-relation. Therefore, $sel == 0$ is a valid skippable condition. + +## Leveraging Contiguous Trace + +Our trace generation always create some contiguous trace, i.e., as soon as a row is inactive ($sel == 0), the next row is inactive. We can leverage this property when working with the skippable condition. + +Example from bitwise.pil: + +$(op_{id'} - op_{id}) * (1 - last) = 0;$ + +We know that our trace generation will fill $op_{id} == 0$ on inactive rows and we can deduce from trace continuity that $op_{id'} == 0$ and therefore this relation will be nullified whenever the skippable condition $sel == 0$ holds. + +## TLDR + +Due to the randomization of non-zero elements by merging two rows, we emphasize that the only skippable conditions which make sense are enforcing some column values to be zero such as $sel = 0$ or $sel_{1} + sel_{2} + ... = 0$. The latter should only be used when the trace generation enforces that $sel_{i} == 0$ for all $i$'s in order to satisfy this condition. This is the case for boolean selectors. diff --git a/barretenberg/cpp/pil/vm2/ecc_mem.pil b/barretenberg/cpp/pil/vm2/ecc_mem.pil index 13e4c965477e..226c23076731 100644 --- a/barretenberg/cpp/pil/vm2/ecc_mem.pil +++ b/barretenberg/cpp/pil/vm2/ecc_mem.pil @@ -8,7 +8,7 @@ include "execution.pil"; * This trace writes the resulting embedded curve point to the addresses {dst, * dst + 1, and dst + 2 }. Embedded curve points consist of the tuple of types * {x: FF, y: FF, is_inf: U1 }. - * + * * Opcode operands (relevant in EXECUTION when interacting with this gadget): * - rop[0]: p_x_addr * - rop[1]: p_y_addr @@ -73,8 +73,7 @@ namespace ecc_add_mem; //////////////////////////////////////////////// // Error Handling - Out of Range Memory Access //////////////////////////////////////////////// - pol commit sel_dst_out_of_range_err; - sel_dst_out_of_range_err * (1 - sel_dst_out_of_range_err) = 0; + pol commit sel_dst_out_of_range_err; // Constrained to be boolean by the lookup into gt. (provided that sel == 1). // Use the comparison gadget to check that the max addresses are within range // The comparison gadget provides the ability to test GreaterThan so we check @@ -85,7 +84,7 @@ namespace ecc_add_mem; #[CHECK_DST_ADDR_IN_RANGE] sel { dst_addr[2], max_mem_addr, sel_dst_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; //////////////////////////////////////////////// // Error Handling - Check Points are on curve @@ -143,41 +142,28 @@ namespace ecc_add_mem; //////////////////////////////////////////////// // Write output to memory //////////////////////////////////////////////// - // TODO: These need to be changed to permutations once we have the custom permutation selectors impl #[WRITE_MEM_0] - sel_should_exec { - execution_clk, dst_addr[0], - res_x, /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, dst_addr[0], res_x, /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_ecc_write[0] { + memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[WRITE_MEM_1] - sel_should_exec { - execution_clk, dst_addr[1], - res_y, /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, dst_addr[1], res_y, /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_ecc_write[1] { + memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[WRITE_MEM_2] - sel_should_exec { - execution_clk, dst_addr[2], - res_is_inf, /*U1_mem_tag=1*/ sel_should_exec, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, dst_addr[2], res_is_inf, /*U1_mem_tag=1*/ sel_should_exec, /*rw=1*/ sel_should_exec + } is + memory.sel_ecc_write[2] { + memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; //////////////////////////////////////////////// diff --git a/barretenberg/cpp/pil/vm2/execution.pil b/barretenberg/cpp/pil/vm2/execution.pil index 480489783d8a..ac2952e19a91 100644 --- a/barretenberg/cpp/pil/vm2/execution.pil +++ b/barretenberg/cpp/pil/vm2/execution.pil @@ -28,6 +28,7 @@ include "trees/nullifier_check.pil"; include "trees/public_data_check.pil"; include "trees/written_public_data_slots_tree_check.pil"; include "trees/l1_to_l2_message_tree_check.pil"; +include "trees/retrieved_bytecodes_tree_check.pil"; include "bytecode/address_derivation.pil"; include "bytecode/bc_decomposition.pil"; @@ -108,12 +109,20 @@ sel_first_row_in_context { contract_address, prev_nullifier_tree_root, // from context.pil prev_public_data_tree_root, // from context.pil + prev_retrieved_bytecodes_tree_root, // from context.pil + prev_retrieved_bytecodes_tree_size, // from context.pil + retrieved_bytecodes_tree_root, // from context.pil + retrieved_bytecodes_tree_size, // from context.pil sel_bytecode_retrieval_failure } in bc_retrieval.sel { bc_retrieval.bytecode_id, bc_retrieval.address, bc_retrieval.nullifier_tree_root, bc_retrieval.public_data_tree_root, + bc_retrieval.prev_retrieved_bytecodes_tree_root, + bc_retrieval.prev_retrieved_bytecodes_tree_size, + bc_retrieval.next_retrieved_bytecodes_tree_root, + bc_retrieval.next_retrieved_bytecodes_tree_size, bc_retrieval.error }; @@ -138,6 +147,10 @@ pol commit op[7]; // operands // ideally want to leave it unconstrained. This is why we do 2 lookups instead of 1. // TODO: We can consider optimizing this later, but it is probably the cleanest right now. +// Note: the below lookups additionally constrain that the bytecode with bytecode_id has +// been correctly hashed due to instr_fetching's lookups into bc_decomposition, where +// there is a multipermutation with bc_hashing. + #[INSTRUCTION_FETCHING_RESULT] sel_bytecode_retrieval_success { pc, bytecode_id, sel_instruction_fetching_failure @@ -147,7 +160,7 @@ sel_bytecode_retrieval_success { pol commit sel_instruction_fetching_success; // If sel = 0, we want sel_instruction_fetching_success = 0. We shouldn't be using it. -sel_instruction_fetching_success = sel * (1 - sel_instruction_fetching_failure); +sel_instruction_fetching_success = sel_bytecode_retrieval_success * (1 - sel_instruction_fetching_failure); #[INSTRUCTION_FETCHING_BODY] sel_instruction_fetching_success { @@ -322,7 +335,7 @@ pol commit sel_radix_gt_256; // sel_radix_gt_256 = 1 iff radix > 256 #[CHECK_RADIX_GT_256] sel_gas_to_radix { /*radix=*/register[1], two_five_six, sel_radix_gt_256 } in -gt.sel { gt.input_a, gt.input_b, gt.res }; +gt.sel_others { gt.input_a, gt.input_b, gt.res }; // Boolean for if we should look up the num_p_limbs value pol commit sel_lookup_num_p_limbs; @@ -347,7 +360,7 @@ pol commit sel_use_num_limbs; #[GET_MAX_LIMBS] sel_gas_to_radix { /*num_limbs=*/register[2], num_p_limbs, sel_use_num_limbs } in -gt.sel { gt.input_a, gt.input_b, gt.res }; +gt.sel_others { gt.input_a, gt.input_b, gt.res }; #[DYN_L2_FACTOR_TO_RADIX_BE] // num_limbs > num_p_limbs ? num_limbs : num_p_limbs sel_gas_to_radix * ((/*num_limbs=*/register[2] - num_p_limbs) * sel_use_num_limbs + num_p_limbs - dynamic_l2_gas_factor) = 0; @@ -378,8 +391,8 @@ sel_gas_sstore { #[SSTORE_DYN_L2_GAS_IS_ZERO] sel_execute_sstore * dynamic_l2_gas_factor = 0; -sel_gas_emit_unencrypted_log * dynamic_l2_gas_factor = 0; -sel_gas_emit_unencrypted_log * (/*log_size=*/ register[1] - dynamic_da_gas_factor) = 0; +sel_gas_emit_unencrypted_log * (/*log_size=*/ register[0] - dynamic_l2_gas_factor) = 0; +sel_gas_emit_unencrypted_log * (/*log_size=*/ register[0] - dynamic_da_gas_factor) = 0; /************************************************************************************************** * Temporality group 5: Opcode execution @@ -408,7 +421,8 @@ pol commit sel_execute_to_radix; pol commit sel_execute_poseidon2_perm; pol commit sel_execute_ecc_add; pol commit sel_execute_execution; -pol commit sel_execute_data_copy; +pol commit sel_execute_calldata_copy; +pol commit sel_execute_returndata_copy; pol commit sel_execute_keccakf1600; pol commit sel_execute_get_contract_instance; pol commit sel_execute_emit_unencrypted_log; @@ -421,7 +435,8 @@ sel_execute_to_radix * (1 - sel_execute_to_radix) = 0; sel_execute_poseidon2_perm * (1 - sel_execute_poseidon2_perm) = 0; sel_execute_ecc_add * (1 - sel_execute_ecc_add) = 0; sel_execute_execution * (1 - sel_execute_execution) = 0; -sel_execute_data_copy * (1 - sel_execute_data_copy) = 0; +sel_execute_calldata_copy * (1 - sel_execute_calldata_copy) = 0; +sel_execute_returndata_copy * (1 - sel_execute_returndata_copy) = 0; sel_execute_keccakf1600 * (1 - sel_execute_keccakf1600) = 0; sel_execute_get_contract_instance * (1 - sel_execute_get_contract_instance) = 0; sel_execute_emit_unencrypted_log * (1 - sel_execute_emit_unencrypted_log) = 0; @@ -437,10 +452,11 @@ sel_execute_poseidon2_perm * constants.AVM_SUBTRACE_ID_POSEIDON_PERM + sel_execute_to_radix * constants.AVM_SUBTRACE_ID_TO_RADIX + sel_execute_ecc_add * constants.AVM_SUBTRACE_ID_ECC + sel_execute_keccakf1600 * constants.AVM_SUBTRACE_ID_KECCAKF1600 + -sel_execute_data_copy * constants.AVM_SUBTRACE_ID_DATA_COPY + +sel_execute_calldata_copy * constants.AVM_SUBTRACE_ID_CALLDATA_COPY + sel_execute_get_contract_instance * constants.AVM_SUBTRACE_ID_GETCONTRACTINSTANCE + sel_execute_emit_unencrypted_log * constants.AVM_SUBTRACE_ID_EMITUNENCRYPTEDLOG + -sel_execute_sha256_compression * constants.AVM_SUBTRACE_ID_SHA256_COMPRESSION +sel_execute_sha256_compression * constants.AVM_SUBTRACE_ID_SHA256_COMPRESSION + +sel_execute_returndata_copy * constants.AVM_SUBTRACE_ID_RETURNDATA_COPY // We force the selectors to be 0 if we are not executing an opcode. = sel_should_execute_opcode * subtrace_id; @@ -635,9 +651,23 @@ sel * (1 - sel_execute_emit_nullifier) * (prev_nullifier_tree_size - nullifier_t #[NUM_NULLIFIERS_EMITTED_NOT_CHANGED] sel * (1 - sel_execute_emit_nullifier) * (prev_num_nullifiers_emitted - num_nullifiers_emitted) = 0; #[NUM_UNENCRYPTED_LOGS_NOT_CHANGED] -sel * (1 - sel_execute_emit_unencrypted_log) * (prev_num_unencrypted_logs - num_unencrypted_logs) = 0; +sel * (1 - sel_execute_emit_unencrypted_log) * (prev_num_unencrypted_log_fields - num_unencrypted_log_fields) = 0; #[NUM_L2_TO_L1_MESSAGES_NOT_CHANGED] sel * (1 - sel_execute_send_l2_to_l1_msg) * (prev_num_l2_to_l1_messages - num_l2_to_l1_messages) = 0; +// Retrieved bytecodes tree state can only change in the first row of a new context +#[RETRIEVED_BYTECODES_TREE_ROOT_NOT_CHANGED] +sel * (1 - sel_first_row_in_context) * (prev_retrieved_bytecodes_tree_root - retrieved_bytecodes_tree_root) = 0; +#[RETRIEVED_BYTECODES_TREE_SIZE_NOT_CHANGED] +sel * (1 - sel_first_row_in_context) * (prev_retrieved_bytecodes_tree_size - retrieved_bytecodes_tree_size) = 0; + +// Some opcodes cannot fail during opcode execution. They should not be able to set sel_opcode_error. +// Note that some of these opcodes can fail in the stages before execution. +// This list only takes into account the opcodes that are handled by the execution subtrace. +// Infallible opcodes which are handled by other traces should constrain sel_opcode_error there. +(sel_execute_mov + sel_execute_returndata_size + sel_execute_jump + + sel_execute_jumpi + sel_execute_debug_log + sel_execute_success_copy + + sel_execute_call + sel_execute_static_call + sel_execute_internal_call + + sel_execute_return + sel_execute_revert) * sel_opcode_error = 0; /************************************************************************************************** * Temporality group 6: Register write. diff --git a/barretenberg/cpp/pil/vm2/execution/addressing.pil b/barretenberg/cpp/pil/vm2/execution/addressing.pil index 3e88450c0341..ea6492b3577a 100644 --- a/barretenberg/cpp/pil/vm2/execution/addressing.pil +++ b/barretenberg/cpp/pil/vm2/execution/addressing.pil @@ -2,6 +2,7 @@ include "../memory.pil"; include "../precomputed.pil"; include "../range_check.pil"; include "../constants_gen.pil"; +include "../gt.pil"; // This gadget constrains the resolution of the operands from instruction fetching. // The input operands are op[0], ..., op[6] (from execution.pil). @@ -29,6 +30,7 @@ include "../constants_gen.pil"; // This is a virtual gadget, which is part of the execution trace. namespace execution; +// We skip if sel_bytecode_retrieval_success == 0 or sel_instruction_fetching_success == 0. #[skippable_if] SEL_SHOULD_RESOLVE_ADDRESS = 0; @@ -87,15 +89,14 @@ pol commit sel_op_is_address[7]; // This is because simulation only considers the bits that can be addresses, and will not // attempt to resolve any other even if the indirect flag says so. Therefore, in particular it // will never error on those operands. -// NOTE: we could avoid this committed column if the lookups could take pol aliases. -pol commit sel_op_is_relative_effective[7]; -sel_op_is_relative_effective[0] = sel_op_is_relative_wire[0] * sel_op_is_address[0]; -sel_op_is_relative_effective[1] = sel_op_is_relative_wire[1] * sel_op_is_address[1]; -sel_op_is_relative_effective[2] = sel_op_is_relative_wire[2] * sel_op_is_address[2]; -sel_op_is_relative_effective[3] = sel_op_is_relative_wire[3] * sel_op_is_address[3]; -sel_op_is_relative_effective[4] = sel_op_is_relative_wire[4] * sel_op_is_address[4]; -sel_op_is_relative_effective[5] = sel_op_is_relative_wire[5] * sel_op_is_address[5]; -sel_op_is_relative_effective[6] = sel_op_is_relative_wire[6] * sel_op_is_address[6]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_0_ = sel_op_is_relative_wire[0] * sel_op_is_address[0]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_1_ = sel_op_is_relative_wire[1] * sel_op_is_address[1]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_2_ = sel_op_is_relative_wire[2] * sel_op_is_address[2]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_3_ = sel_op_is_relative_wire[3] * sel_op_is_address[3]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_4_ = sel_op_is_relative_wire[4] * sel_op_is_address[4]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_5_ = sel_op_is_relative_wire[5] * sel_op_is_address[5]; +pol SEL_OP_IS_RELATIVE_EFFECTIVE_6_ = sel_op_is_relative_wire[6] * sel_op_is_address[6]; + pol SEL_OP_IS_INDIRECT_EFFECTIVE_0_ = sel_op_is_indirect_wire[0] * sel_op_is_address[0]; pol SEL_OP_IS_INDIRECT_EFFECTIVE_1_ = sel_op_is_indirect_wire[1] * sel_op_is_address[1]; pol SEL_OP_IS_INDIRECT_EFFECTIVE_2_ = sel_op_is_indirect_wire[2] * sel_op_is_address[2]; @@ -117,9 +118,10 @@ pol commit base_address_tag; // If any operand requires relative resolution, we check the base address. pol commit sel_do_base_check; -// We do a bit of extra work to check if the sum of sel_op_is_relative_effective[i] is non-zero. -pol NUM_RELATIVE_OPERANDS = sel_op_is_relative_effective[0] + sel_op_is_relative_effective[1] + sel_op_is_relative_effective[2] + sel_op_is_relative_effective[3] - + sel_op_is_relative_effective[4] + sel_op_is_relative_effective[5] + sel_op_is_relative_effective[6]; +sel_do_base_check * (1 - sel_do_base_check) = 0; +// We do a bit of extra work to check if the sum of SEL_OP_IS_RELATIVE_EFFECTIVE[i] is non-zero. +pol NUM_RELATIVE_OPERANDS = SEL_OP_IS_RELATIVE_EFFECTIVE_0_ + SEL_OP_IS_RELATIVE_EFFECTIVE_1_ + SEL_OP_IS_RELATIVE_EFFECTIVE_2_ + SEL_OP_IS_RELATIVE_EFFECTIVE_3_ + + SEL_OP_IS_RELATIVE_EFFECTIVE_4_ + SEL_OP_IS_RELATIVE_EFFECTIVE_5_ + SEL_OP_IS_RELATIVE_EFFECTIVE_6_; pol commit num_relative_operands_inv; // See https://hackmd.io/moq6viBpRJeLpWrHAogCZw#With-Error-Support. pol NUM_RELATIVE_X = NUM_RELATIVE_OPERANDS; @@ -128,14 +130,13 @@ pol NUM_RELATIVE_E = 1 - sel_do_base_check; #[NUM_RELATIVE_INV_CHECK] NUM_RELATIVE_X * (NUM_RELATIVE_E * (1 - NUM_RELATIVE_Y) + NUM_RELATIVE_Y) - 1 + NUM_RELATIVE_E = 0; -// FIXME: this should eventually be a permutation. #[BASE_ADDRESS_FROM_MEMORY] sel_do_base_check { precomputed.clk, context_id, /*address=*/precomputed.zero, /*value=*/base_address_val, /*tag=*/base_address_tag, /*rw=*/precomputed.zero/*(read)*/ } -in -memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_base { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // This error will be true iff the base address is not valid AND we did actually check it. pol commit sel_base_address_failure; +sel_base_address_failure * (1 - sel_base_address_failure) = 0; // We check the TAG of the base address only. This implies that the value is valid. // The following bunch of constraints force @@ -157,81 +158,75 @@ BASE_CHECK_X * (BASE_CHECK_E * (1 - BASE_CHECK_Y) + BASE_CHECK_Y) - 1 + BASE_CHE pol commit op_after_relative[7]; // Whether relative resolution overflowed the address space. pol commit sel_relative_overflow[7]; -sel_relative_overflow[0] * (1 - sel_relative_overflow[0]) = 0; -sel_relative_overflow[1] * (1 - sel_relative_overflow[1]) = 0; -sel_relative_overflow[2] * (1 - sel_relative_overflow[2]) = 0; -sel_relative_overflow[3] * (1 - sel_relative_overflow[3]) = 0; -sel_relative_overflow[4] * (1 - sel_relative_overflow[4]) = 0; -sel_relative_overflow[5] * (1 - sel_relative_overflow[5]) = 0; -sel_relative_overflow[6] * (1 - sel_relative_overflow[6]) = 0; - -// If sel_op_is_relative_effective[i] is 0, then sel_relative_overflow[i] must be 0. -#[NOT_RELATIVE_NO_OVERFLOW_0] -sel_relative_overflow[0] * (1 - sel_op_is_relative_effective[0]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_1] -sel_relative_overflow[1] * (1 - sel_op_is_relative_effective[1]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_2] -sel_relative_overflow[2] * (1 - sel_op_is_relative_effective[2]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_3] -sel_relative_overflow[3] * (1 - sel_op_is_relative_effective[3]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_4] -sel_relative_overflow[4] * (1 - sel_op_is_relative_effective[4]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_5] -sel_relative_overflow[5] * (1 - sel_op_is_relative_effective[5]) = 0; -#[NOT_RELATIVE_NO_OVERFLOW_6] -sel_relative_overflow[6] * (1 - sel_op_is_relative_effective[6]) = 0; +// sel_relative_overflow[i] is a boolean value: +// - SEL_OP_IS_RELATIVE_EFFECTIVE[i] == 0, we enforce below that sel_relative_overflow[i] == 0 (see #[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_i]). +// - sel_base_address_failure == 1, we enforce below that sel_relative_overflow[i] == 0 (see #[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_i]). +// - SEL_OP_IS_RELATIVE_EFFECTIVE[i] == 1 and sel_base_address_failure == 0, we enforce through +// the lookup #[RELATIVE_OVERFLOW_RESULT_i] that sel_relative_overflow[i] is boolean. This is enforced by the gt gadget. + +// If SEL_OP_IS_RELATIVE_EFFECTIVE[i] == 0 OR sel_base_address_failure == 1, then sel_relative_overflow[i] must be 0. +// The factor 1 - SEL_OP_IS_RELATIVE_EFFECTIVE_0_ + 2 * sel_base_address_failure == 0 iff +// SEL_OP_IS_RELATIVE_EFFECTIVE_0_ == 1 AND sel_base_address_failure == 0 (because both values are boolean). +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_0] +sel_relative_overflow[0] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_0_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_1] +sel_relative_overflow[1] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_1_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_2] +sel_relative_overflow[2] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_2_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_3] +sel_relative_overflow[3] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_3_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_4] +sel_relative_overflow[4] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_4_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_5] +sel_relative_overflow[5] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_5_ + 2 * sel_base_address_failure) = 0; +#[NOT_RELATIVE_OR_BASE_FAILURE_NO_OVERFLOW_6] +sel_relative_overflow[6] * (1 - SEL_OP_IS_RELATIVE_EFFECTIVE_6_ + 2 * sel_base_address_failure) = 0; // Note that we will not add the base address if there was a failure. pol RELATIVE_RESOLUTION_FILTER = (1 - sel_base_address_failure) * base_address_val; // Alias for performance. #[RELATIVE_RESOLUTION_0] -op_after_relative[0] = op[0] + sel_op_is_relative_effective[0] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[0] = op[0] + SEL_OP_IS_RELATIVE_EFFECTIVE_0_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_1] -op_after_relative[1] = op[1] + sel_op_is_relative_effective[1] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[1] = op[1] + SEL_OP_IS_RELATIVE_EFFECTIVE_1_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_2] -op_after_relative[2] = op[2] + sel_op_is_relative_effective[2] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[2] = op[2] + SEL_OP_IS_RELATIVE_EFFECTIVE_2_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_3] -op_after_relative[3] = op[3] + sel_op_is_relative_effective[3] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[3] = op[3] + SEL_OP_IS_RELATIVE_EFFECTIVE_3_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_4] -op_after_relative[4] = op[4] + sel_op_is_relative_effective[4] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[4] = op[4] + SEL_OP_IS_RELATIVE_EFFECTIVE_4_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_5] -op_after_relative[5] = op[5] + sel_op_is_relative_effective[5] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[5] = op[5] + SEL_OP_IS_RELATIVE_EFFECTIVE_5_ * RELATIVE_RESOLUTION_FILTER; #[RELATIVE_RESOLUTION_6] -op_after_relative[6] = op[6] + sel_op_is_relative_effective[6] * RELATIVE_RESOLUTION_FILTER; +op_after_relative[6] = op[6] + SEL_OP_IS_RELATIVE_EFFECTIVE_6_ * RELATIVE_RESOLUTION_FILTER; + +pol commit sel_op_do_overflow_check[7]; +sel_op_do_overflow_check[0] = SEL_OP_IS_RELATIVE_EFFECTIVE_0_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[1] = SEL_OP_IS_RELATIVE_EFFECTIVE_1_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[2] = SEL_OP_IS_RELATIVE_EFFECTIVE_2_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[3] = SEL_OP_IS_RELATIVE_EFFECTIVE_3_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[4] = SEL_OP_IS_RELATIVE_EFFECTIVE_4_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[5] = SEL_OP_IS_RELATIVE_EFFECTIVE_5_ * (1 - sel_base_address_failure); +sel_op_do_overflow_check[6] = SEL_OP_IS_RELATIVE_EFFECTIVE_6_ * (1 - sel_base_address_failure); // Helper columns for overflow range check. -pol commit two_to_32; -SEL_SHOULD_RESOLVE_ADDRESS * (two_to_32 - 2**32) = 0; -pol commit overflow_range_check_result[7]; +pol commit highest_address; +// TODO: remove when we support constants in a lookup tuple. +SEL_SHOULD_RESOLVE_ADDRESS * (highest_address - constants.AVM_HIGHEST_MEM_ADDRESS) = 0; +// sel_relative_overflow[i] == 1 iff op_after_relative[i] > highest_address. #[RELATIVE_OVERFLOW_RESULT_0] -overflow_range_check_result[0] = sel_op_is_relative_effective[0] * ((1 - sel_relative_overflow[0]) * (2 * two_to_32 - 2 * op_after_relative[0] - 1) + op_after_relative[0] - two_to_32); +sel_op_do_overflow_check[0] { op_after_relative[0], highest_address, sel_relative_overflow[0] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_1] -overflow_range_check_result[1] = sel_op_is_relative_effective[1] * ((1 - sel_relative_overflow[1]) * (2 * two_to_32 - 2 * op_after_relative[1] - 1) + op_after_relative[1] - two_to_32); +sel_op_do_overflow_check[1] { op_after_relative[1], highest_address, sel_relative_overflow[1] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_2] -overflow_range_check_result[2] = sel_op_is_relative_effective[2] * ((1 - sel_relative_overflow[2]) * (2 * two_to_32 - 2 * op_after_relative[2] - 1) + op_after_relative[2] - two_to_32); +sel_op_do_overflow_check[2] { op_after_relative[2], highest_address, sel_relative_overflow[2] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_3] -overflow_range_check_result[3] = sel_op_is_relative_effective[3] * ((1 - sel_relative_overflow[3]) * (2 * two_to_32 - 2 * op_after_relative[3] - 1) + op_after_relative[3] - two_to_32); +sel_op_do_overflow_check[3] { op_after_relative[3], highest_address, sel_relative_overflow[3] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_4] -overflow_range_check_result[4] = sel_op_is_relative_effective[4] * ((1 - sel_relative_overflow[4]) * (2 * two_to_32 - 2 * op_after_relative[4] - 1) + op_after_relative[4] - two_to_32); +sel_op_do_overflow_check[4] { op_after_relative[4], highest_address, sel_relative_overflow[4] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_5] -overflow_range_check_result[5] = sel_op_is_relative_effective[5] * ((1 - sel_relative_overflow[5]) * (2 * two_to_32 - 2 * op_after_relative[5] - 1) + op_after_relative[5] - two_to_32); +sel_op_do_overflow_check[5] { op_after_relative[5], highest_address, sel_relative_overflow[5] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; #[RELATIVE_OVERFLOW_RESULT_6] -overflow_range_check_result[6] = sel_op_is_relative_effective[6] * ((1 - sel_relative_overflow[6]) * (2 * two_to_32 - 2 * op_after_relative[6] - 1) + op_after_relative[6] - two_to_32); - -// Note column constant_32 is defined in other file. -#[RELATIVE_OVERFLOW_RANGE_0] -sel_op_is_relative_effective[0] { overflow_range_check_result[0], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_1] -sel_op_is_relative_effective[1] { overflow_range_check_result[1], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_2] -sel_op_is_relative_effective[2] { overflow_range_check_result[2], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_3] -sel_op_is_relative_effective[3] { overflow_range_check_result[3], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_4] -sel_op_is_relative_effective[4] { overflow_range_check_result[4], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_5] -sel_op_is_relative_effective[5] { overflow_range_check_result[5], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; -#[RELATIVE_OVERFLOW_RANGE_6] -sel_op_is_relative_effective[6] { overflow_range_check_result[6], constant_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_op_do_overflow_check[6] { op_after_relative[6], highest_address, sel_relative_overflow[6] } in gt.sel_addressing { gt.input_a, gt.input_b, gt.res }; /************************************************************************************************** * Indirection Resolution @@ -259,30 +254,28 @@ sel_should_apply_indirection[6] = SEL_OP_IS_INDIRECT_EFFECTIVE_6_ * (1 - sel_rel pol commit rop_tag[7]; // If indirection is applied, we need to lookup the value from memory. -// If sel_should_apply_indirection is 1, then we know the address is valid therefore we can make the lookups. -// TODO: complete these lookups once we get memory done. In particular, clk and space id. -// FIXME: these should eventually be permutations. +// If sel_should_apply_indirection is 1, then we know the address is valid therefore we can make the permutations. #[INDIRECT_FROM_MEMORY_0] sel_should_apply_indirection[0] { precomputed.clk, context_id, /*address=*/op_after_relative[0], /*value=*/rop[0], /*tag=*/rop_tag[0], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[0] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_1] sel_should_apply_indirection[1] { precomputed.clk, context_id, /*address=*/op_after_relative[1], /*value=*/rop[1], /*tag=*/rop_tag[1], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[1] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_2] sel_should_apply_indirection[2] { precomputed.clk, context_id, /*address=*/op_after_relative[2], /*value=*/rop[2], /*tag=*/rop_tag[2], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[2] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_3] sel_should_apply_indirection[3] { precomputed.clk, context_id, /*address=*/op_after_relative[3], /*value=*/rop[3], /*tag=*/rop_tag[3], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[3] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_4] sel_should_apply_indirection[4] { precomputed.clk, context_id, /*address=*/op_after_relative[4], /*value=*/rop[4], /*tag=*/rop_tag[4], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[4] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_5] sel_should_apply_indirection[5] { precomputed.clk, context_id, /*address=*/op_after_relative[5], /*value=*/rop[5], /*tag=*/rop_tag[5], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[5] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[INDIRECT_FROM_MEMORY_6] sel_should_apply_indirection[6] { precomputed.clk, context_id, /*address=*/op_after_relative[6], /*value=*/rop[6], /*tag=*/rop_tag[6], /*rw=*/precomputed.zero/*(read)*/ } -in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; +is memory.sel_addressing_indirect[6] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // Otherwise, if indirection is not applied , we propagate the operands from the previous step. #[INDIRECT_PROPAGATION_0] @@ -311,6 +304,7 @@ in memory.sel { memory.clk, memory.space_id, memory.address, memory.value, memor // Observe that we don't need to know exactly which one failed. // We use this fact to "batch" the checks and do only 1 comparison against 0 (inverse check). pol commit sel_some_final_check_failed; +sel_some_final_check_failed * (1 - sel_some_final_check_failed) = 0; // Each tag takes at most 3 bits (guaranteed by mem lookup!), we can encode all of them in a field. // See https://hackmd.io/moq6viBpRJeLpWrHAogCZw?both#Batching-comparison-of-n-bit-numbers. // This diff will be 0 iff all tags are U32. @@ -336,6 +330,7 @@ BATCHED_TAGS_DIFF_EQ = 0; // Whether there was any error resolving any of the operands. // I.e., the OR of all other error selectors. pol commit sel_addressing_error; +sel_addressing_error * (1 - sel_addressing_error) = 0; pol ADDRESSING_ERROR_COLLECTION = sel_base_address_failure + sel_relative_overflow[0] + sel_relative_overflow[1] + sel_relative_overflow[2] + sel_relative_overflow[3] + sel_relative_overflow[4] + sel_relative_overflow[5] + sel_relative_overflow[6] diff --git a/barretenberg/cpp/pil/vm2/execution/discard.pil b/barretenberg/cpp/pil/vm2/execution/discard.pil index 3c9e875b7764..36319e471f52 100644 --- a/barretenberg/cpp/pil/vm2/execution/discard.pil +++ b/barretenberg/cpp/pil/vm2/execution/discard.pil @@ -1,8 +1,12 @@ -// Discarding on error in execution - -// This is a virtual gadget, which is part of the execution trace. -// This subtrace is focused on managing the changes to the discard and dying_context_id columns. -namespace execution; +/** + * Discarding on error in execution. + * + * Design Document: https://docs.google.com/document/d/1xz5sZSxTu841K8uvnT8U-nO2X5ZjY8o0TjOYKfT9b6o + * + * This subtrace is focused on managing the changes to the discard and dying_context_id columns. + * It is a virtual gadget, which is part of the execution trace. + */ +namespace execution; // virtual to execution.pil // No relations will be checked if this identity is satisfied. #[skippable_if] diff --git a/barretenberg/cpp/pil/vm2/execution/gas.pil b/barretenberg/cpp/pil/vm2/execution/gas.pil index ac7bbde9bf30..abb97cec31b2 100644 --- a/barretenberg/cpp/pil/vm2/execution/gas.pil +++ b/barretenberg/cpp/pil/vm2/execution/gas.pil @@ -1,5 +1,5 @@ include "../precomputed.pil"; -include "../range_check.pil"; +include "../gt.pil"; // This is a virtual gadget, which is part of the execution trace. // https://excalidraw.com/#json=-fkwtFjYVOq2Z69Q351AE,O3SbjwK5eHX7-Oz2OH8RqQ @@ -49,8 +49,8 @@ namespace execution; // ==== COMPARISON AGAINST THE LIMITS ==== // We will sum up all the gas used (base and dynamic) and compare just once. - pol TOTAL_L2_GAS_USED = BASE_L2_GAS + DYNAMIC_L2_GAS_USED; - pol TOTAL_DA_GAS_USED = base_da_gas + DYNAMIC_DA_GAS_USED; + pol L2_GAS_USED = BASE_L2_GAS + DYNAMIC_L2_GAS_USED; + pol DA_GAS_USED = base_da_gas + DYNAMIC_DA_GAS_USED; // We are going to do 64 bit comparisons. If we assume: // prev_l2_gas_used to be u32::MAX_VALUE @@ -59,43 +59,24 @@ namespace execution; // dynamic_da_gas to be u32::MAX_VALUE // Then prev_l2_gas_used + BASE_L2_GAS + dynamic_l2_gas_factor*dynamic_da_gas is exactly u64::MAX_VALUE. - pol commit constant_64; - sel_should_check_gas * (64 - constant_64) = 0; - - pol commit out_of_gas_l2; - out_of_gas_l2 * (1 - out_of_gas_l2) = 0; - pol commit out_of_gas_da; - out_of_gas_da * (1 - out_of_gas_da) = 0; - - pol PREV_GAS_PLUS_USAGE_L2 = prev_l2_gas_used + TOTAL_L2_GAS_USED; - // Assumes l2_gas_limit is 32 bit and PREV_GAS_PLUS_USAGE_L2 is 64 bit - // So we perform a 64 bit comparison - pol LIMIT_GTE_USED_L2 = l2_gas_limit - PREV_GAS_PLUS_USAGE_L2; - pol LIMIT_LT_USED_L2 = PREV_GAS_PLUS_USAGE_L2 - l2_gas_limit - 1; - pol commit limit_used_l2_cmp_diff; - // We multiply by sel_should_check_gas to force a 0 if we shouldn't check the gas. - #[L2_CMP_DIFF] - limit_used_l2_cmp_diff = sel_should_check_gas * ((LIMIT_LT_USED_L2 - LIMIT_GTE_USED_L2) * out_of_gas_l2 + LIMIT_GTE_USED_L2); - - #[LIMIT_USED_L2_RANGE] - sel_should_check_gas { limit_used_l2_cmp_diff, constant_64 } - in - range_check.sel { range_check.value, range_check.rng_chk_bits }; - - pol PREV_GAS_PLUS_USAGE_DA = prev_da_gas_used + TOTAL_DA_GAS_USED; - // Assumes da_gas_limit is 32 bit and PREV_GAS_PLUS_USAGE_DA is 64 bit - // So we perform a 64 bit comparison - pol LIMIT_GTE_USED_DA = da_gas_limit - PREV_GAS_PLUS_USAGE_DA; - pol LIMIT_LT_USED_DA = PREV_GAS_PLUS_USAGE_DA - da_gas_limit - 1; - pol commit limit_used_da_cmp_diff; - // We multiply by sel_should_check_gas to force a 0 if we shouldn't check the gas. - #[DA_CMP_DIFF] - limit_used_da_cmp_diff = sel_should_check_gas * ((LIMIT_LT_USED_DA - LIMIT_GTE_USED_DA) * out_of_gas_da + LIMIT_GTE_USED_DA); - - #[LIMIT_USED_DA_RANGE] - sel_should_check_gas { limit_used_da_cmp_diff, constant_64 } - in - range_check.sel { range_check.value, range_check.rng_chk_bits }; + pol commit out_of_gas_l2; // Boolean constraint enforced through lookup into gt. + pol commit out_of_gas_da; // Boolean constraint enforced through lookup into gt. + + // TODO: Once we support expression in lookup, we can replace this column by an alias. + pol commit total_gas_l2; + sel_should_check_gas * (prev_l2_gas_used + L2_GAS_USED - total_gas_l2) = 0; + // Assumes l2_gas_limit is 32 bit and total_gas_l2 is 64 bit. + + #[IS_OUT_OF_GAS_L2] + sel_should_check_gas { total_gas_l2, l2_gas_limit, out_of_gas_l2 } in gt.sel_gas { gt.input_a, gt.input_b, gt.res }; + + // TODO: Once we support expression in lookup, we can replace this column by an alias. + pol commit total_gas_da; + sel_should_check_gas * (prev_da_gas_used + DA_GAS_USED - total_gas_da) = 0; + // Assumes da_gas_limit is 32 bit and total_gas_da is 64 bit. + + #[IS_OUT_OF_GAS_DA] + sel_should_check_gas { total_gas_da, da_gas_limit, out_of_gas_da } in gt.sel_gas { gt.input_a, gt.input_b, gt.res }; pol commit sel_out_of_gas; sel_out_of_gas = 1 - (1 - out_of_gas_l2) * (1 - out_of_gas_da); diff --git a/barretenberg/cpp/pil/vm2/execution/registers.pil b/barretenberg/cpp/pil/vm2/execution/registers.pil index 31aad55ddf34..f82ad01b617b 100644 --- a/barretenberg/cpp/pil/vm2/execution/registers.pil +++ b/barretenberg/cpp/pil/vm2/execution/registers.pil @@ -49,36 +49,36 @@ sel_op_reg_effective[5] = sel_mem_op_reg[5] * (sel_should_read_registers * (1 - #[SEL_OP_REG_EFFECTIVE_6] sel_op_reg_effective[6] = sel_mem_op_reg[6] * (sel_should_read_registers * (1 - rw_reg[6]) + sel_should_write_registers * rw_reg[6]); -// FIXME: this should eventually be a permutation. -// Observe that the following lookups span both temporality groups. +// Observe that the following permutations span both temporality groups. // That's why we have to properly activate them with the above selectors, which take into account // whether we have reached a given phase. #[MEM_OP_0] -sel_op_reg_effective[0] { rop[0], register[0], mem_tag_reg[0], rw_reg[0], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[0] { precomputed.clk, context_id, rop[0], register[0], mem_tag_reg[0], rw_reg[0] } +is memory.sel_register_op[0] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_1] -sel_op_reg_effective[1] { rop[1], register[1], mem_tag_reg[1], rw_reg[1], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[1] { precomputed.clk, context_id, rop[1], register[1], mem_tag_reg[1], rw_reg[1] } +is memory.sel_register_op[1] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_2] -sel_op_reg_effective[2] { rop[2], register[2], mem_tag_reg[2], rw_reg[2], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[2] { precomputed.clk, context_id, rop[2], register[2], mem_tag_reg[2], rw_reg[2] } +is memory.sel_register_op[2] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_3] -sel_op_reg_effective[3] { rop[3], register[3], mem_tag_reg[3], rw_reg[3], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[3] { precomputed.clk, context_id, rop[3], register[3], mem_tag_reg[3], rw_reg[3] } +is memory.sel_register_op[3] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_4] -sel_op_reg_effective[4] { rop[4], register[4], mem_tag_reg[4], rw_reg[4], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[4] { precomputed.clk, context_id, rop[4], register[4], mem_tag_reg[4], rw_reg[4] } +is memory.sel_register_op[4] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_5] -sel_op_reg_effective[5] { rop[5], register[5], mem_tag_reg[5], rw_reg[5], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[5] { precomputed.clk, context_id, rop[5], register[5], mem_tag_reg[5], rw_reg[5] } +is memory.sel_register_op[5] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; #[MEM_OP_6] -sel_op_reg_effective[6] { rop[6], register[6], mem_tag_reg[6], rw_reg[6], precomputed.clk, context_id } -in memory.sel { memory.address, memory.value, memory.tag, memory.rw, memory.clk, memory.space_id }; +sel_op_reg_effective[6] { precomputed.clk, context_id, rop[6], register[6], mem_tag_reg[6], rw_reg[6] } +is memory.sel_register_op[6] { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // This error is true iff some final check failed. That is if some tag is not the expected one. // Observe that we don't need to know exactly which one failed. // We use this fact to "batch" the checks and do only 1 comparison against 0 (inverse check). pol commit sel_register_read_error; +sel_register_read_error * (1 - sel_register_read_error) = 0; // Each tag takes at most 3 bits, we can encode all of them in a field. // See https://hackmd.io/moq6viBpRJeLpWrHAogCZw?both#Batching-comparison-of-n-bit-numbers. // This diff will be 0 iff all tags are the expected one. diff --git a/barretenberg/cpp/pil/vm2/ff_gt.pil b/barretenberg/cpp/pil/vm2/ff_gt.pil index 90663e97da03..753e48dbf436 100644 --- a/barretenberg/cpp/pil/vm2/ff_gt.pil +++ b/barretenberg/cpp/pil/vm2/ff_gt.pil @@ -46,7 +46,7 @@ namespace ff_gt; pol commit a; pol commit b; pol commit result; - (result * (1 - result)) = 0; + result * (1 - result) = 0; // Should be looked up based on this selector // This will be off when doing the shifts for the remaning range constraints. diff --git a/barretenberg/cpp/pil/vm2/gt.pil b/barretenberg/cpp/pil/vm2/gt.pil index 2ed02c1ad34a..c57587ced5f8 100644 --- a/barretenberg/cpp/pil/vm2/gt.pil +++ b/barretenberg/cpp/pil/vm2/gt.pil @@ -1,10 +1,36 @@ include "range_check.pil"; +// Important Precondition: +// Both inputs (input_a and input_b) must be bounded by p - 1 - 2^128 +// and the asbolute difference between them must be less than 2^128. These +// properties need to be constrained by the caller. +// In the avm, we will use this gadget with both inputs bounded by 2^128. +// Note that simulation (gt.cpp) is taking uint128_t input values. Values of +// types FF use ff_gt gadget. +// +// Usage: +// +// sel { a, b, result } in gt.sel { gt.input_a, gt.input_b, gt.res }; +// where gt.res is a boolean value that is true if input_a > input_b, false otherwise. namespace gt; pol commit sel; sel * (1 - sel) = 0; + // This is used to decouple generation of inverses of lookups into this trace. + pol commit sel_sha256; + pol commit sel_addressing; + pol commit sel_alu; + pol commit sel_gas; + pol commit sel_others; // Any other lookup into this trace. + sel_sha256 * (1 - sel_sha256) = 0; + sel_addressing * (1 - sel_addressing) = 0; + sel_alu * (1 - sel_alu) = 0; + sel_gas * (1 - sel_gas) = 0; + sel_others * (1 - sel_others) = 0; + // If any of the above selectors is 1, then sel must be 1. + (sel_sha256 + sel_addressing + sel_alu + sel_gas + sel_others) * (1 - sel) = 0; + #[skippable_if] sel = 0; @@ -14,17 +40,23 @@ namespace gt; pol commit res; // Boolean res * (1 - res) = 0; - // NOTE: We currently range check all abs diffs against 128 bits, which for most calls will be sub-optimal, - // but allows us to avoid forwarding a rng_chk_bits value and deduplicate more events. - // TODO: Investigate whether we want to define the rng_chk_bits here or continue checking all against 128. - pol commit constant_128; - sel * (128 - constant_128) = 0; - pol A_LTE_B = input_b - input_a; pol A_GT_B = input_a - input_b - 1; pol commit abs_diff; + // In trace generation, we will pick num_bits to be the smallest multiple of 16 such that + // 2^num_bits > abs_diff. This deterministic derivation leads to a deduplication of the range_check + // event whenever the inputs are the same. In other words, whenever the gt event deduplicates, + // the range_check event will also deduplicate. + // num_bits is not constrained here but range_check forces that abs_diff < 2^128 no matter what. + // A range-check on abs_diff with any number of bits <= 128 therefore asserts that #[GT_RESULT] + // is correct (assuming the preconditions are met). + // The reason to pick num_bits smaller than 128 is to reduce the number of activated 16-bit range checks + // in range_check.pil. This is a performance optimization. Choosing a tighter num_bits being not a + // multiple of 16 would not reduce the number of 16-bit range checks. + pol commit num_bits; + #[GT_RESULT] sel * ( (A_GT_B - A_LTE_B) * res + A_LTE_B - abs_diff ) = 0; #[GT_RANGE] - sel { abs_diff, constant_128 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; + sel { abs_diff, num_bits } in range_check.sel_gt { range_check.value, range_check.rng_chk_bits }; diff --git a/barretenberg/cpp/pil/vm2/keccak_memory.pil b/barretenberg/cpp/pil/vm2/keccak_memory.pil index 15ec85309871..d7ddc241ae7e 100644 --- a/barretenberg/cpp/pil/vm2/keccak_memory.pil +++ b/barretenberg/cpp/pil/vm2/keccak_memory.pil @@ -88,7 +88,7 @@ sel * (1 - sel) = 0; // No relations will be checked if this identity is satisfied. #[skippable_if] -sel + last = 0; +sel = 0; pol commit start_read; start_read * (1 - start_read) = 0; @@ -237,13 +237,9 @@ val43 = (1 - last) * val42'; #[VAL44] val44 = (1 - last) * val43'; -// Memory permutation -// TODO: Proper memory permutation (not a lookup), and introduce a specific -// selector for keccak in memory sub-trace. #[SLICE_TO_MEM] -sel {clk, addr, val00, tag, rw, space_id} -in // TODO: replace with `is` (permutation) -memory.sel { memory.clk, memory.address, memory.value, memory.tag, memory.rw, memory.space_id }; +sel { clk, space_id, addr, val00, tag, rw } +is memory.sel_keccak { memory.clk, memory.space_id, memory.address, memory.value, memory.tag, memory.rw }; // Used to constrain the number of rounds in keccakf1600.pil through the slice_write lookup. pol commit num_rounds; diff --git a/barretenberg/cpp/pil/vm2/keccakf1600.pil b/barretenberg/cpp/pil/vm2/keccakf1600.pil index 1f712a665ef1..1637439dd8a4 100644 --- a/barretenberg/cpp/pil/vm2/keccakf1600.pil +++ b/barretenberg/cpp/pil/vm2/keccakf1600.pil @@ -74,7 +74,7 @@ sel * (1 - sel) = 0; // No relations will be checked if this identity is satisfied. #[skippable_if] -sel + last = 0; +sel = 0; // error is defined below after round function pol commit sel_no_error; @@ -181,25 +181,25 @@ pol commit state_in_00, state_in_01, state_in_02, state_in_03, state_in_04, pol commit theta_xor_01; #[THETA_XOR_01] sel_no_error { bitwise_xor_op_id, state_in_00, state_in_01, theta_xor_01, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_01 ^ state_in_02 = theta_xor_02 pol commit theta_xor_02; #[THETA_XOR_02] sel_no_error { bitwise_xor_op_id, theta_xor_01, state_in_02, theta_xor_02, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_02 ^ state_in_03 = theta_xor_03 pol commit theta_xor_03; #[THETA_XOR_03] sel_no_error { bitwise_xor_op_id, theta_xor_02, state_in_03, theta_xor_03, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_03 ^ state_in_04 = theta_xor_row_0 pol commit theta_xor_row_0; #[THETA_XOR_ROW_0] sel_no_error { bitwise_xor_op_id, theta_xor_03, state_in_04, theta_xor_row_0, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Theta XOR Computation Index 1 @@ -209,25 +209,25 @@ bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, b pol commit theta_xor_11; #[THETA_XOR_11] sel_no_error { bitwise_xor_op_id, state_in_10, state_in_11, theta_xor_11, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_11 ^ state_in_12 = theta_xor_12 pol commit theta_xor_12; #[THETA_XOR_12] sel_no_error { bitwise_xor_op_id, theta_xor_11, state_in_12, theta_xor_12, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_12 ^ state_in_13 = theta_xor_13 pol commit theta_xor_13; #[THETA_XOR_13] sel_no_error { bitwise_xor_op_id, theta_xor_12, state_in_13, theta_xor_13, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_13 ^ state_in_14 = theta_xor_row_1 pol commit theta_xor_row_1; #[THETA_XOR_ROW_1] sel_no_error { bitwise_xor_op_id, theta_xor_13, state_in_14, theta_xor_row_1, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Theta XOR Computation Index 2 @@ -237,25 +237,25 @@ bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, b pol commit theta_xor_21; #[THETA_XOR_21] sel_no_error { bitwise_xor_op_id, state_in_20, state_in_21, theta_xor_21, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_21 ^ state_in_22 = theta_xor_22 pol commit theta_xor_22; #[THETA_XOR_22] sel_no_error { bitwise_xor_op_id, theta_xor_21, state_in_22, theta_xor_22, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_22 ^ state_in_23 = theta_xor_23 pol commit theta_xor_23; #[THETA_XOR_23] sel_no_error { bitwise_xor_op_id, theta_xor_22, state_in_23, theta_xor_23, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_23 ^ state_in_24 = theta_xor_row_2 pol commit theta_xor_row_2; #[THETA_XOR_ROW_2] sel_no_error { bitwise_xor_op_id, theta_xor_23, state_in_24, theta_xor_row_2, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Theta XOR Computation Index 3 @@ -265,25 +265,25 @@ bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, b pol commit theta_xor_31; #[THETA_XOR_31] sel_no_error { bitwise_xor_op_id, state_in_30, state_in_31, theta_xor_31, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_31 ^ state_in_32 = theta_xor_32 pol commit theta_xor_32; #[THETA_XOR_32] sel_no_error { bitwise_xor_op_id, theta_xor_31, state_in_32, theta_xor_32, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_32 ^ state_in_33 = theta_xor_33 pol commit theta_xor_33; #[THETA_XOR_33] sel_no_error { bitwise_xor_op_id, theta_xor_32, state_in_33, theta_xor_33, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_33 ^ state_in_34 = theta_xor_row_3 pol commit theta_xor_row_3; #[THETA_XOR_ROW_3] sel_no_error { bitwise_xor_op_id, theta_xor_33, state_in_34, theta_xor_row_3, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Theta XOR Computation Index 4 @@ -293,25 +293,25 @@ bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, b pol commit theta_xor_41; #[THETA_XOR_41] sel_no_error { bitwise_xor_op_id, state_in_40, state_in_41, theta_xor_41, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_41 ^ state_in_42 = theta_xor_42 pol commit theta_xor_42; #[THETA_XOR_42] sel_no_error { bitwise_xor_op_id, theta_xor_41, state_in_42, theta_xor_42, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_42 ^ state_in_43 = theta_xor_43 pol commit theta_xor_43; #[THETA_XOR_43] sel_no_error { bitwise_xor_op_id, theta_xor_42, state_in_43, theta_xor_43, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // theta_xor_43 ^ state_in_44 = theta_xor_row_4 pol commit theta_xor_row_4; #[THETA_XOR_ROW_4] sel_no_error { bitwise_xor_op_id, theta_xor_43, state_in_44, theta_xor_row_4, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Theta XOR ROTL1 Computation @@ -392,27 +392,27 @@ theta_xor_row_rotl1_4 = 2 * theta_xor_row_low63_4 + theta_xor_row_msb_4; pol commit theta_combined_xor_0; #[THETA_COMBINED_XOR_0] sel_no_error { bitwise_xor_op_id, theta_xor_row_4, theta_xor_row_rotl1_1, theta_combined_xor_0, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; pol commit theta_combined_xor_1; #[THETA_COMBINED_XOR_1] sel_no_error { bitwise_xor_op_id, theta_xor_row_0, theta_xor_row_rotl1_2, theta_combined_xor_1, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; pol commit theta_combined_xor_2; #[THETA_COMBINED_XOR_2] sel_no_error { bitwise_xor_op_id, theta_xor_row_1, theta_xor_row_rotl1_3, theta_combined_xor_2, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; pol commit theta_combined_xor_3; #[THETA_COMBINED_XOR_3] sel_no_error { bitwise_xor_op_id, theta_xor_row_2, theta_xor_row_rotl1_4, theta_combined_xor_3, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; pol commit theta_combined_xor_4; #[THETA_COMBINED_XOR_4] sel_no_error { bitwise_xor_op_id, theta_xor_row_3, theta_xor_row_rotl1_0, theta_combined_xor_4, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // State after Theta @@ -429,107 +429,107 @@ pol commit state_theta_00, state_theta_01, state_theta_02, state_theta_03, state // ##################### STATE_THETA Row 0 ############################## #[STATE_THETA_00] sel_no_error { bitwise_xor_op_id, state_in_00, theta_combined_xor_0, state_theta_00, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_01] sel_no_error { bitwise_xor_op_id, state_in_01, theta_combined_xor_0, state_theta_01, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_02] sel_no_error { bitwise_xor_op_id, state_in_02, theta_combined_xor_0, state_theta_02, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_03] sel_no_error { bitwise_xor_op_id, state_in_03, theta_combined_xor_0, state_theta_03, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_04] sel_no_error { bitwise_xor_op_id, state_in_04, theta_combined_xor_0, state_theta_04, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // ##################### STATE_THETA Row 1 ############################## #[STATE_THETA_10] sel_no_error { bitwise_xor_op_id, state_in_10, theta_combined_xor_1, state_theta_10, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_11] sel_no_error { bitwise_xor_op_id, state_in_11, theta_combined_xor_1, state_theta_11, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_12] sel_no_error { bitwise_xor_op_id, state_in_12, theta_combined_xor_1, state_theta_12, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_13] sel_no_error { bitwise_xor_op_id, state_in_13, theta_combined_xor_1, state_theta_13, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_14] sel_no_error { bitwise_xor_op_id, state_in_14, theta_combined_xor_1, state_theta_14, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // ##################### STATE_THETA Row 2 ############################## #[STATE_THETA_20] sel_no_error { bitwise_xor_op_id, state_in_20, theta_combined_xor_2, state_theta_20, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_21] sel_no_error { bitwise_xor_op_id, state_in_21, theta_combined_xor_2, state_theta_21, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_22] sel_no_error { bitwise_xor_op_id, state_in_22, theta_combined_xor_2, state_theta_22, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_23] sel_no_error { bitwise_xor_op_id, state_in_23, theta_combined_xor_2, state_theta_23, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_24] sel_no_error { bitwise_xor_op_id, state_in_24, theta_combined_xor_2, state_theta_24, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // ##################### STATE_THETA Row 3 ############################## #[STATE_THETA_30] sel_no_error { bitwise_xor_op_id, state_in_30, theta_combined_xor_3, state_theta_30, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_31] sel_no_error { bitwise_xor_op_id, state_in_31, theta_combined_xor_3, state_theta_31, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_32] sel_no_error { bitwise_xor_op_id, state_in_32, theta_combined_xor_3, state_theta_32, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_33] sel_no_error { bitwise_xor_op_id, state_in_33, theta_combined_xor_3, state_theta_33, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_34] sel_no_error { bitwise_xor_op_id, state_in_34, theta_combined_xor_3, state_theta_34, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // ##################### STATE_THETA Row 4 ############################## #[STATE_THETA_40] sel_no_error { bitwise_xor_op_id, state_in_40, theta_combined_xor_4, state_theta_40, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_41] sel_no_error { bitwise_xor_op_id, state_in_41, theta_combined_xor_4, state_theta_41, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_42] sel_no_error { bitwise_xor_op_id, state_in_42, theta_combined_xor_4, state_theta_42, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_43] sel_no_error { bitwise_xor_op_id, state_in_43, theta_combined_xor_4, state_theta_43, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; #[STATE_THETA_44] sel_no_error { bitwise_xor_op_id, state_in_44, theta_combined_xor_4, state_theta_44, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Rho round function @@ -743,72 +743,72 @@ state_rho_44 = POW_ROT_LEN_44 * state_theta_low_44 + state_theta_hi_44; pol commit rot_len_02; sel * (rot_len_02 - ROT_LEN_02) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_02_RANGE] -sel_no_error { state_theta_hi_02, rot_len_02 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_02, rot_len_02 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_04; sel * (rot_len_04 - ROT_LEN_04) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_04_RANGE] -sel_no_error { state_theta_hi_04, rot_len_04 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_04, rot_len_04 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_10; sel * (rot_len_10 - ROT_LEN_10) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_10_RANGE] -sel_no_error { state_theta_hi_10, rot_len_10 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_10, rot_len_10 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_12; sel * (rot_len_12 - ROT_LEN_12) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_12_RANGE] -sel_no_error { state_theta_hi_12, rot_len_12 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_12, rot_len_12 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_14; sel * (rot_len_14 - ROT_LEN_14) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_14_RANGE] -sel_no_error { state_theta_hi_14, rot_len_14 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_14, rot_len_14 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_21; sel * (rot_len_21 - ROT_LEN_21) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_21_RANGE] -sel_no_error { state_theta_hi_21, rot_len_21 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_21, rot_len_21 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_23; sel * (rot_len_23 - ROT_LEN_23) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_23_RANGE] -sel_no_error { state_theta_hi_23, rot_len_23 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_23, rot_len_23 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_30; sel * (rot_len_30 - ROT_LEN_30) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_30_RANGE] -sel_no_error { state_theta_hi_30, rot_len_30 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_30, rot_len_30 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_32; sel * (rot_len_32 - ROT_LEN_32) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_32_RANGE] -sel_no_error { state_theta_hi_32, rot_len_32 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_32, rot_len_32 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_33; sel * (rot_len_33 - ROT_LEN_33) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_33_RANGE] -sel_no_error { state_theta_hi_33, rot_len_33 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_33, rot_len_33 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_40; sel * (rot_len_40 - ROT_LEN_40) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_40_RANGE] -sel_no_error { state_theta_hi_40, rot_len_40 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_40, rot_len_40 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_41; sel * (rot_len_41 - ROT_LEN_41) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_41_RANGE] -sel_no_error { state_theta_hi_41, rot_len_41 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_41, rot_len_41 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_43; sel * (rot_len_43 - ROT_LEN_43) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_43_RANGE] -sel_no_error { state_theta_hi_43, rot_len_43 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_43, rot_len_43 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_len_44; sel * (rot_len_44 - ROT_LEN_44) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_44_RANGE] -sel_no_error { state_theta_hi_44, rot_len_44 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_hi_44, rot_len_44 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; // Rotations with length > 32: // Indices: 01, 03, 11, 13, 20, 22, 24, 31, 34, 42 @@ -818,52 +818,52 @@ sel_no_error { state_theta_hi_44, rot_len_44 } in range_check.sel { range_check. pol commit rot_64_min_len_01; sel * (rot_64_min_len_01 - (64 - ROT_LEN_01)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_01_RANGE] -sel_no_error { state_theta_low_01, rot_64_min_len_01 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_01, rot_64_min_len_01 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_03; sel * (rot_64_min_len_03 - (64 - ROT_LEN_03)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_03_RANGE] -sel_no_error { state_theta_low_03, rot_64_min_len_03 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_03, rot_64_min_len_03 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_11; sel * (rot_64_min_len_11 - (64 - ROT_LEN_11)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_11_RANGE] -sel_no_error { state_theta_low_11, rot_64_min_len_11 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_11, rot_64_min_len_11 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_13; sel * (rot_64_min_len_13 - (64 - ROT_LEN_13)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_13_RANGE] -sel_no_error { state_theta_low_13, rot_64_min_len_13 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_13, rot_64_min_len_13 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_20; sel * (rot_64_min_len_20 - (64 - ROT_LEN_20)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_20_RANGE] -sel_no_error { state_theta_low_20, rot_64_min_len_20 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_20, rot_64_min_len_20 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_22; sel * (rot_64_min_len_22 - (64 - ROT_LEN_22)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_22_RANGE] -sel_no_error { state_theta_low_22, rot_64_min_len_22 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_22, rot_64_min_len_22 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_24; sel * (rot_64_min_len_24 - (64 - ROT_LEN_24)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_24_RANGE] -sel_no_error { state_theta_low_24, rot_64_min_len_24 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_24, rot_64_min_len_24 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_31; sel * (rot_64_min_len_31 - (64 - ROT_LEN_31)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_31_RANGE] -sel_no_error { state_theta_low_31, rot_64_min_len_31 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_31, rot_64_min_len_31 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_34; sel * (rot_64_min_len_34 - (64 - ROT_LEN_34)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_34_RANGE] -sel_no_error { state_theta_low_34, rot_64_min_len_34 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_34, rot_64_min_len_34 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; pol commit rot_64_min_len_42; sel * (rot_64_min_len_42 - (64 - ROT_LEN_42)) = 0; // TODO: Remove once we support constants in lookups #[THETA_LIMB_42_RANGE] -sel_no_error { state_theta_low_42, rot_64_min_len_42 } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +sel_no_error { state_theta_low_42, rot_64_min_len_42 } in range_check.sel_keccak { range_check.value, range_check.rng_chk_bits }; //############################################################################# // Pi round function @@ -998,127 +998,127 @@ pol commit state_pi_and_00, state_pi_and_01, state_pi_and_02, state_pi_and_03, s // STATE_PI_20 = state_rho_22 #[STATE_PI_AND_00] sel_no_error { bitwise_and_op_id, state_pi_not_10, state_rho_22, state_pi_and_00, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_21 = state_rho_02 #[STATE_PI_AND_01] sel_no_error { bitwise_and_op_id, state_pi_not_11, state_rho_02, state_pi_and_01, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_22 = state_rho_32 #[STATE_PI_AND_02] sel_no_error { bitwise_and_op_id, state_pi_not_12, state_rho_32, state_pi_and_02, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_23 = state_rho_12 #[STATE_PI_AND_03] sel_no_error { bitwise_and_op_id, state_pi_not_13, state_rho_12, state_pi_and_03, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_24 = state_rho_42 #[STATE_PI_AND_04] sel_no_error { bitwise_and_op_id, state_pi_not_14, state_rho_42, state_pi_and_04, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_30 = state_rho_33 #[STATE_PI_AND_10] sel_no_error { bitwise_and_op_id, state_pi_not_20, state_rho_33, state_pi_and_10, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_31 = state_rho_13 #[STATE_PI_AND_11] sel_no_error { bitwise_and_op_id, state_pi_not_21, state_rho_13, state_pi_and_11, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_32 = state_rho_43 #[STATE_PI_AND_12] sel_no_error { bitwise_and_op_id, state_pi_not_22, state_rho_43, state_pi_and_12, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_33 = state_rho_23 #[STATE_PI_AND_13] sel_no_error { bitwise_and_op_id, state_pi_not_23, state_rho_23, state_pi_and_13, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_34 = state_rho_03 #[STATE_PI_AND_14] sel_no_error { bitwise_and_op_id, state_pi_not_24, state_rho_03, state_pi_and_14, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_40 = state_rho_44 #[STATE_PI_AND_20] sel_no_error { bitwise_and_op_id, state_pi_not_30, state_rho_44, state_pi_and_20, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_41 = state_rho_24 #[STATE_PI_AND_21] sel_no_error { bitwise_and_op_id, state_pi_not_31, state_rho_24, state_pi_and_21, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_42 = state_rho_04 #[STATE_PI_AND_22] sel_no_error { bitwise_and_op_id, state_pi_not_32, state_rho_04, state_pi_and_22, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_43 = state_rho_34 #[STATE_PI_AND_23] sel_no_error { bitwise_and_op_id, state_pi_not_33, state_rho_34, state_pi_and_23, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_44 = state_rho_14 #[STATE_PI_AND_24] sel_no_error { bitwise_and_op_id, state_pi_not_34, state_rho_14, state_pi_and_24, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_00 = STATE_RHO_00 = state_theta_00 #[STATE_PI_AND_30] sel_no_error { bitwise_and_op_id, state_pi_not_40, state_theta_00, state_pi_and_30, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_01 = state_rho_30 #[STATE_PI_AND_31] sel_no_error { bitwise_and_op_id, state_pi_not_41, state_rho_30, state_pi_and_31, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_02 = state_rho_10 #[STATE_PI_AND_32] sel_no_error { bitwise_and_op_id, state_pi_not_42, state_rho_10, state_pi_and_32, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_03 = state_rho_40 #[STATE_PI_AND_33] sel_no_error { bitwise_and_op_id, state_pi_not_43, state_rho_40, state_pi_and_33, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_04 = state_rho_20 #[STATE_PI_AND_34] sel_no_error { bitwise_and_op_id, state_pi_not_44, state_rho_20, state_pi_and_34, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_10 = state_rho_11 #[STATE_PI_AND_40] sel_no_error { bitwise_and_op_id, state_pi_not_00, state_rho_11, state_pi_and_40, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_11 = state_rho_41 #[STATE_PI_AND_41] sel_no_error { bitwise_and_op_id, state_pi_not_01, state_rho_41, state_pi_and_41, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_12 = state_rho_21 #[STATE_PI_AND_42] sel_no_error { bitwise_and_op_id, state_pi_not_02, state_rho_21, state_pi_and_42, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_13 = state_rho_01 #[STATE_PI_AND_43] sel_no_error { bitwise_and_op_id, state_pi_not_03, state_rho_01, state_pi_and_43, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_14 = state_rho_31 #[STATE_PI_AND_44] sel_no_error { bitwise_and_op_id, state_pi_not_04, state_rho_31, state_pi_and_44, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //#################### chi final XOR Computation ######################################## // state_chi_ij = state_pi_ij ^ state_pi_and_ij @@ -1132,127 +1132,127 @@ pol commit state_chi_00, state_chi_01, state_chi_02, state_chi_03, state_chi_04, // STATE_PI_00 = STATE_RHO_00 = state_theta_00 #[STATE_CHI_00] sel_no_error { bitwise_xor_op_id, state_theta_00, state_pi_and_00, state_chi_00, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_01 = state_rho_30 #[STATE_CHI_01] sel_no_error { bitwise_xor_op_id, state_rho_30, state_pi_and_01, state_chi_01, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_02 = state_rho_10 #[STATE_CHI_02] sel_no_error { bitwise_xor_op_id, state_rho_10, state_pi_and_02, state_chi_02, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_03 = state_rho_40 #[STATE_CHI_03] sel_no_error { bitwise_xor_op_id, state_rho_40, state_pi_and_03, state_chi_03, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_04 = state_rho_20 #[STATE_CHI_04] sel_no_error { bitwise_xor_op_id, state_rho_20, state_pi_and_04, state_chi_04, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_10 = state_rho_11 #[STATE_CHI_10] sel_no_error { bitwise_xor_op_id, state_rho_11, state_pi_and_10, state_chi_10, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_11 = state_rho_41 #[STATE_CHI_11] sel_no_error { bitwise_xor_op_id, state_rho_41, state_pi_and_11, state_chi_11, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_12 = state_rho_21 #[STATE_CHI_12] sel_no_error { bitwise_xor_op_id, state_rho_21, state_pi_and_12, state_chi_12, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_13 = state_rho_01 #[STATE_CHI_13] sel_no_error { bitwise_xor_op_id, state_rho_01, state_pi_and_13, state_chi_13, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_14 = state_rho_31 #[STATE_CHI_14] sel_no_error { bitwise_xor_op_id, state_rho_31, state_pi_and_14, state_chi_14, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_20 = state_rho_22 #[STATE_CHI_20] sel_no_error { bitwise_xor_op_id, state_rho_22, state_pi_and_20, state_chi_20, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_21 = state_rho_02 #[STATE_CHI_21] sel_no_error { bitwise_xor_op_id, state_rho_02, state_pi_and_21, state_chi_21, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_22 = state_rho_32 #[STATE_CHI_22] sel_no_error { bitwise_xor_op_id, state_rho_32, state_pi_and_22, state_chi_22, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_23 = state_rho_12 #[STATE_CHI_23] sel_no_error { bitwise_xor_op_id, state_rho_12, state_pi_and_23, state_chi_23, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_24 = state_rho_42 #[STATE_CHI_24] sel_no_error { bitwise_xor_op_id, state_rho_42, state_pi_and_24, state_chi_24, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_30 = state_rho_33 #[STATE_CHI_30] sel_no_error { bitwise_xor_op_id, state_rho_33, state_pi_and_30, state_chi_30, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_31 = state_rho_13 #[STATE_CHI_31] sel_no_error { bitwise_xor_op_id, state_rho_13, state_pi_and_31, state_chi_31, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_32 = state_rho_43 #[STATE_CHI_32] sel_no_error { bitwise_xor_op_id, state_rho_43, state_pi_and_32, state_chi_32, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_33 = state_rho_23 #[STATE_CHI_33] sel_no_error { bitwise_xor_op_id, state_rho_23, state_pi_and_33, state_chi_33, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_34 = state_rho_03 #[STATE_CHI_34] sel_no_error { bitwise_xor_op_id, state_rho_03, state_pi_and_34, state_chi_34, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_40 = state_rho_44 #[STATE_CHI_40] sel_no_error { bitwise_xor_op_id, state_rho_44, state_pi_and_40, state_chi_40, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_41 = state_rho_24 #[STATE_CHI_41] sel_no_error { bitwise_xor_op_id, state_rho_24, state_pi_and_41, state_chi_41, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_42 = state_rho_04 #[STATE_CHI_42] sel_no_error { bitwise_xor_op_id, state_rho_04, state_pi_and_42, state_chi_42, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_43 = state_rho_34 #[STATE_CHI_43] sel_no_error { bitwise_xor_op_id, state_rho_34, state_pi_and_43, state_chi_43, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; // STATE_PI_44 = state_rho_14 #[STATE_CHI_44] sel_no_error { bitwise_xor_op_id, state_rho_14, state_pi_and_44, state_chi_44, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Iota round function @@ -1271,7 +1271,7 @@ sel_no_error { round, round_cst } in precomputed.sel_keccak { precomputed.clk, p #[STATE_IOTA_00] sel_no_error { bitwise_xor_op_id, state_chi_00, round_cst, state_iota_00, tag_u64 } in -bitwise.start { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; +bitwise.start_keccak { bitwise.op_id, bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.tag_a }; //############################################################################# // Next state Inputs OR Outputs @@ -1342,38 +1342,26 @@ pol commit src_addr; pol commit dst_addr; pol commit space_id; pol commit tag_error; // Constrained to be boolean in keccak_memory.pil -pol commit src_out_of_range_error; -pol commit dst_out_of_range_error; +pol commit src_out_of_range_error; // Constrained to be boolean by the lookup into gt. (provided that start == 1). +pol commit dst_out_of_range_error; // Constrained to be boolean by the lookup into gt. (provided that start == 1). pol commit error; // Note that errors are only relevant at row where start == 1. // On the other rows, they are underconstrained. -#[SRC_OUT_OF_RANGE_ERROR_BOOLEAN] -src_out_of_range_error * (1 - src_out_of_range_error) = 0; -#[DST_OUT_OF_RANGE_ERROR_BOOLEAN] -dst_out_of_range_error * (1 - dst_out_of_range_error) = 0; - pol HIGHEST_SLICE_ADDRESS = constants.AVM_HIGHEST_MEM_ADDRESS - constants.AVM_KECCAKF1600_STATE_SIZE + 1; -// start == 1 ==> [src_out_of_range_error == 1 && src_addr > HIGHEST_SLICE_ADDRESS] -pol commit src_abs_diff; +// TODO: remove this column when we support constants in lookup tuples. +pol commit highest_slice_address; +start * (highest_slice_address - HIGHEST_SLICE_ADDRESS) = 0; + +// start == 1 ==> [src_out_of_range_error == 1 <==> src_addr > HIGHEST_SLICE_ADDRESS] #[SRC_OUT_OF_RANGE_TOGGLE] -src_abs_diff = start * ((2 * src_out_of_range_error - 1) * (src_addr - HIGHEST_SLICE_ADDRESS) - src_out_of_range_error); +start { src_addr, highest_slice_address, src_out_of_range_error } in gt.sel_others { gt.input_a, gt.input_b, gt.res }; -// start == 1 ==> [dst_out_of_range_error == 1 && dst_addr > HIGHEST_SLICE_ADDRESS] -pol commit dst_abs_diff; +// start == 1 ==> [dst_out_of_range_error == 1 <==> dst_addr > HIGHEST_SLICE_ADDRESS] #[DST_OUT_OF_RANGE_TOGGLE] -dst_abs_diff = start * ((2 * dst_out_of_range_error - 1) * (dst_addr - HIGHEST_SLICE_ADDRESS) - dst_out_of_range_error); - -// TODO: remove this column when we support constants in lookup tuples. -pol commit thirty_two; -sel * (thirty_two - 32) = 0; - -#[SRC_ABS_DIFF_POSITIVE] -start { src_abs_diff, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +start { dst_addr, highest_slice_address, dst_out_of_range_error } in gt.sel_others { gt.input_a, gt.input_b, gt.res }; -#[DST_ABS_DIFF_POSITIVE] -start { dst_abs_diff, thirty_two } in range_check.sel { range_check.value, range_check.rng_chk_bits }; // error = src_out_of_range_error || dst_out_of_range_error || tag_error // boolean by definition diff --git a/barretenberg/cpp/pil/vm2/memory.pil b/barretenberg/cpp/pil/vm2/memory.pil index faf4ed35891d..27842619d6cf 100644 --- a/barretenberg/cpp/pil/vm2/memory.pil +++ b/barretenberg/cpp/pil/vm2/memory.pil @@ -1,20 +1,258 @@ +include "constants_gen.pil"; +include "precomputed.pil"; include "range_check.pil"; namespace memory; -pol commit sel; -pol commit clk; -pol commit address; +// Link to design document: https://docs.google.com/document/d/1NM5zU39NG5xJReOjSObwWllbpqTCO4flr9v8kdRDK3E + +// Main memory values directly derived from the memory events. pol commit value; pol commit tag; -pol commit rw; -pol commit space_id; +pol commit space_id; // Memory space identifier (16-bit). +pol commit address; // Memory address (32-bit). +pol commit clk; // Clock cycle. (32-bit) Row index of corresponding opcode in the execution trace. +pol commit rw; // Memory operation type: 0 for read, 1 for write. + +// Boolean selectors. +pol commit sel; // Main selector to toggle an active row (used by any interaction lookups.). +pol commit last_access; // Last memory access for a given global address +pol commit sel_rng_chk; // Every active row except the last one +pol commit sel_tag_is_ff; // Toggles if tag == FF +pol commit sel_rng_write; // Toggles if rw == 1 and tag != FF (Activate range check for write values.) + +// Derived values. +pol commit global_addr; // Global unique address derived from space_id and address. +pol commit timestamp; // Timestamp derived from clk and rw. +pol commit diff; // Difference between timestamp or global_addr values of consecutive rows to prove that memory is correctly sorted. +pol commit limb[3]; // 16-bit decomposition limbs for diff. + +// Others +pol commit max_bits; // Number of bits corresponding to the tag. (Retrieved from precomputed subtrace.) + +// Trace entries ordering. +// The memory entries are sorted in ascending order by space_id, address, clk, then rw. +// Equivalently, they are sorted by global_addr and then timestamp. +// last_access == 1 iff the last memory access for a given global address. + +// Trace shape + +// +-----+-----------+-------+-----+-----+---------------+------------+------------+--------------+-------------+ +// | sel | space_id | address | clk | rw | global_addr | timestamp | last_access| diff | sel_rng_chk | +// +-----+-----------+-------+-----+-----+---------------+------------+------------+--------------+-------------+ +// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0 | +// | 1 | 1 | 27 | 5 | 1 | 2^32 + 27 | 5 * 2 + 1 | 1 | 1 | 1 | +// | 1 | 1 | 28 | 12 | 1 | 2^32 + 28 | 12 * 2 + 1 | 1 | 2 * 2^32 + 3 | 1 | +// | 1 | 3 | 31 | 7 | 0 | 3 * 2^32 + 31 | 7 * 2 + 0 | 0 | 3 | 1 | +// | 1 | 3 | 31 | 8 | 1 | 3 * 2^32 + 31 | 8 * 2 + 1 | 1 | 2^32 + 15 | 1 | +// | 1 | 4 | 46 | 5 | 0 | 4 * 2^32 + 46 | 5 * 2 + 0 | 0 | 0 | 1 | +// | 1 | 4 | 46 | 5 | 0 | 4 * 2^32 + 46 | 5 * 2 + 0 | 0 | 1 | 1 | +// | 1 | 4 | 46 | 5 | 1 | 4 * 2^32 + 46 | 5 * 2 + 1 | 0 | 1 | 1 | +// | 1 | 4 | 46 | 6 | 1 | 4 * 2^32 + 46 | 6 * 2 + 1 | 1 | 43 | 1 | +// | 1 | 4 | 89 | 2 | 1 | 4 * 2^32 + 89 | 2 * 2 + 1 | 1 |-4 * 2^32 - 89| 0 | +// +-----+-----------+-------+-----+-------------+----------------------+----------+--------------+-------------+ #[skippable_if] sel = 0; -sel * (sel - 1) = 0; -rw * (1 - rw) = 0; +// Permutation selectors (execution/addressing.pil). +pol commit sel_addressing_base; +pol commit sel_addressing_indirect[7]; +sel_addressing_base * (1 - sel_addressing_base) = 0; +sel_addressing_indirect[0] * (1 - sel_addressing_indirect[0]) = 0; +sel_addressing_indirect[1] * (1 - sel_addressing_indirect[1]) = 0; +sel_addressing_indirect[2] * (1 - sel_addressing_indirect[2]) = 0; +sel_addressing_indirect[3] * (1 - sel_addressing_indirect[3]) = 0; +sel_addressing_indirect[4] * (1 - sel_addressing_indirect[4]) = 0; +sel_addressing_indirect[5] * (1 - sel_addressing_indirect[5]) = 0; +sel_addressing_indirect[6] * (1 - sel_addressing_indirect[6]) = 0; + +// Permutation selectors (execution/registers.pil) +pol commit sel_register_op[7]; +sel_register_op[0] * (1 - sel_register_op[0]) = 0; +sel_register_op[1] * (1 - sel_register_op[1]) = 0; +sel_register_op[2] * (1 - sel_register_op[2]) = 0; +sel_register_op[3] * (1 - sel_register_op[3]) = 0; +sel_register_op[4] * (1 - sel_register_op[4]) = 0; +sel_register_op[5] * (1 - sel_register_op[5]) = 0; +sel_register_op[6] * (1 - sel_register_op[6]) = 0; + +// Permutation selectors (data_copy.pil). +pol commit sel_data_copy_read; +pol commit sel_data_copy_write; +sel_data_copy_read * (1 - sel_data_copy_read) = 0; +sel_data_copy_write * (1 - sel_data_copy_write) = 0; + +// Permutation selectors (opcodes/get_contract_instance.pil). +pol commit sel_get_contract_instance_exists_write; +pol commit sel_get_contract_instance_member_write; +sel_get_contract_instance_exists_write * (1 - sel_get_contract_instance_exists_write) = 0; +sel_get_contract_instance_member_write * (1 - sel_get_contract_instance_member_write) = 0; + +// Permutation selectors (opcodes/emit_unencrypted_log.pil). +pol commit sel_unencrypted_log_read; +sel_unencrypted_log_read * (1 - sel_unencrypted_log_read) = 0; + +// Permutation selectors (poseidon2_mem.pil). +pol commit sel_poseidon2_read[4]; +pol commit sel_poseidon2_write[4]; +sel_poseidon2_read[0] * (1 - sel_poseidon2_read[0]) = 0; +sel_poseidon2_read[1] * (1 - sel_poseidon2_read[1]) = 0; +sel_poseidon2_read[2] * (1 - sel_poseidon2_read[2]) = 0; +sel_poseidon2_read[3] * (1 - sel_poseidon2_read[3]) = 0; +sel_poseidon2_write[0] * (1 - sel_poseidon2_write[0]) = 0; +sel_poseidon2_write[1] * (1 - sel_poseidon2_write[1]) = 0; +sel_poseidon2_write[2] * (1 - sel_poseidon2_write[2]) = 0; +sel_poseidon2_write[3] * (1 - sel_poseidon2_write[3]) = 0; + +// Permutation selectors (keccak_memory.pil). +pol commit sel_keccak; +sel_keccak * (1 - sel_keccak) = 0; + +// Permutation selectors (sha256_mem.pil). +pol commit sel_sha256_read; +pol commit sel_sha256_op[8]; +sel_sha256_read * (1 - sel_sha256_read) = 0; +sel_sha256_op[0] * (1 - sel_sha256_op[0]) = 0; +sel_sha256_op[1] * (1 - sel_sha256_op[1]) = 0; +sel_sha256_op[2] * (1 - sel_sha256_op[2]) = 0; +sel_sha256_op[3] * (1 - sel_sha256_op[3]) = 0; +sel_sha256_op[4] * (1 - sel_sha256_op[4]) = 0; +sel_sha256_op[5] * (1 - sel_sha256_op[5]) = 0; +sel_sha256_op[6] * (1 - sel_sha256_op[6]) = 0; +sel_sha256_op[7] * (1 - sel_sha256_op[7]) = 0; + +// Permutation selectors (ecc_mem.pil). +pol commit sel_ecc_write[3]; +sel_ecc_write[0] * (1 - sel_ecc_write[0]) = 0; +sel_ecc_write[1] * (1 - sel_ecc_write[1]) = 0; +sel_ecc_write[2] * (1 - sel_ecc_write[2]) = 0; + +// Permutation selectors (to_radix_mem.pil). +pol commit sel_to_radix_write; +sel_to_radix_write * (1 - sel_to_radix_write) = 0; + +// Permutation consistency. +// TODO(#16759): Enable the following relation ensuring that each memory entry is associated with a valid permutation selector. +// DebugLog opcode is generating some memory entries which should not be triggered. +#[ACTIVE_ROW_NEEDS_PERM_SELECTOR] +sel = // Addressing. + sel_addressing_base + + sel_addressing_indirect[0] + sel_addressing_indirect[1] + sel_addressing_indirect[2] + sel_addressing_indirect[3] + + sel_addressing_indirect[4] + sel_addressing_indirect[5] + sel_addressing_indirect[6] + // Registers. + + sel_register_op[0] + sel_register_op[1] + sel_register_op[2] + sel_register_op[3] + sel_register_op[4] + + sel_register_op[5] + sel_register_op[6] + // Data Copy. + + sel_data_copy_read + + sel_data_copy_write + // Get Contract Instance. + + sel_get_contract_instance_exists_write + + sel_get_contract_instance_member_write + // Unencrypted Log. + + sel_unencrypted_log_read + // Poseidon2. + + sel_poseidon2_read[0] + sel_poseidon2_read[1] + sel_poseidon2_read[2] + sel_poseidon2_read[3] + + sel_poseidon2_write[0] + sel_poseidon2_write[1] + sel_poseidon2_write[2] + sel_poseidon2_write[3] + // Keccak. + + sel_keccak + // Sha256. + + sel_sha256_read + + sel_sha256_op[0] + sel_sha256_op[1] + sel_sha256_op[2] + sel_sha256_op[3] + + sel_sha256_op[4] + sel_sha256_op[5] + sel_sha256_op[6] + sel_sha256_op[7] + // ECC. + + sel_ecc_write[0] + sel_ecc_write[1] + sel_ecc_write[2] + // To Radix. + + sel_to_radix_write; + +// Other boolean constraints. +sel * (1 - sel) = 0; // Ensure mutual exclusivity of the permutation selectors. +last_access * (1 - last_access) = 0; +rw * (1 - rw) = 0; // TODO: should already be constrained by each source of interaction lookups. +sel_tag_is_ff * (1 - sel_tag_is_ff) = 0; + +// Trace must be contiguous. +// Except on first row: sel == 0 ==> sel' == 0 +// As a consequence, the trace is empty or it starts just after the first row and +// is contiguous. +#[MEM_CONTIGUOUS] +(1 - precomputed.first_row) * (1 - sel) * sel' = 0; + +// Boolean toggles at all active rows except the last one. +// It is boolean by definition. +#[SEL_RNG_CHK] +sel_rng_chk = sel * sel'; + +// Derived values. + +#[GLOBAL_ADDR] +global_addr = space_id * 2**32 + address; + +#[TIMESTAMP] +timestamp = 2 * clk + rw; + +// last_access derivation +// last_access == 0 iff global_addr' == global_addr +pol GLOB_ADDR_DIFF = global_addr' - global_addr; +pol commit glob_addr_diff_inv; // Helper column for non zero equality check +#[LAST_ACCESS] +sel_rng_chk * (GLOB_ADDR_DIFF * ((1 - last_access) * (1 - glob_addr_diff_inv) + glob_addr_diff_inv) - last_access) = 0; + +// diff derivation +// We alternate between two different diffs: +// - last_access == 1: diff = global_addr' - global_addr +// - last_access == 0: diff = timestamp' - timestamp - rw' * rw +#[DIFF] +diff = sel_rng_chk * (last_access * GLOB_ADDR_DIFF + (1 - last_access) * (timestamp' - timestamp - rw' * rw)); + +// Decompose diff into 16-bit limbs. +#[DIFF_DECOMP] +diff = limb[0] + limb[1] * 2**16 + limb[2] * 2**32; + +// Range check limbs + +#[RANGE_CHECK_LIMB_0] +sel_rng_chk { limb[0] } in precomputed.sel_range_16 { precomputed.clk }; +#[RANGE_CHECK_LIMB_1] +sel_rng_chk { limb[1] } in precomputed.sel_range_16 { precomputed.clk }; +#[RANGE_CHECK_LIMB_2] +sel_rng_chk { limb[2] } in precomputed.sel_range_16 { precomputed.clk }; + +// Memory Initialization +#[MEMORY_INIT_VALUE] +(last_access + precomputed.first_row) * (1 - rw') * value' = 0; +#[MEMORY_INIT_TAG] +(last_access + precomputed.first_row) * (1 - rw') * (tag' - constants.MEM_TAG_FF) = 0; + +// Read-Write Consistency +// Note that last_access == 0 on the first row with our trace generation. If the first +// memory entry is a READ, then the following constraints will be satisifed because +// value == 0 and tag == MEM_TAG_FF == 0 will be set on the first row which corresponds +// to an initial read. +#[READ_WRITE_CONSISTENCY_VALUE] +(1 - last_access) * (1 - rw') * (value' - value) = 0; +#[READ_WRITE_CONSISTENCY_TAG] +(1 - last_access) * (1 - rw') * (tag' - tag) = 0; + + +// We prove that sel_tag_is_ff == 1 <==> tag == MEM_TAG_FF +pol TAG_FF_DIFF = tag - constants.MEM_TAG_FF; +pol commit tag_ff_diff_inv; +#[TAG_IS_FF] +sel * (TAG_FF_DIFF * (sel_tag_is_ff * (1 - tag_ff_diff_inv) + tag_ff_diff_inv) + sel_tag_is_ff - 1) = 0; + +// Boolean by definition. +#[SEL_RNG_WRITE] +sel_rng_write = rw * (1 - sel_tag_is_ff); + +// Retrieve the number of bits for the range check +#[TAG_MAX_BITS] +sel_rng_write { tag, max_bits } +in +precomputed.sel_tag_parameters { precomputed.clk, precomputed.tag_max_bits }; -// TODO: consider tag-value consistency checking. -// TODO: consider address range checking. +// Range check for the tagged value validation +#[RANGE_CHECK_WRITE_TAGGED_VALUE] +sel_rng_write {value, max_bits} +in +range_check.sel_memory { range_check.value, range_check.rng_chk_bits }; diff --git a/barretenberg/cpp/pil/vm2/opcodes/emit_unencrypted_log.pil b/barretenberg/cpp/pil/vm2/opcodes/emit_unencrypted_log.pil index 67bfff5dbc6e..7a75a44ff7d4 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/emit_unencrypted_log.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/emit_unencrypted_log.pil @@ -10,19 +10,17 @@ include "../constants_gen.pil"; * * The opcode reads log data from memory starting at log_address for log_size fields, * and writes the log data to public inputs if all checks pass. - * The gadget generates a fixed number of rows (PUBLIC_LOG_SIZE_IN_FIELDS) - * per log emission, with padding rows when the actual log size is smaller. + * The gadget generates a dynamic number of rows based on the actual log size. * * Opcode operands (from execution trace): - * - rop[0]: log_address (memory offset where log data starts) - * - register[1]: log_size (number of fields in the log) + * - register[0]: log_size (number of fields in the log) + * - rop[1]: log_address (memory offset where log data starts) * - * The interaction with the execution trace is only done on the start row. The other PUBLIC_LOG_SIZE_IN_FIELDS - 1 rows are reading the fields of the log and writing to public inputs. + * The interaction with the execution trace is only done on the start row. The subsequent rows are used for reading the log fields from memory and writing to public inputs. * * Validation checks: - * - Log size must not exceed PUBLIC_LOG_SIZE_IN_FIELDS * - Memory addresses must be within bounds (≤ AVM_HIGHEST_MEM_ADDRESS) - * - Must not exceed maximum logs per transaction (MAX_PUBLIC_LOGS_PER_TX) + * - Must not exceed maximum log fields per transaction (FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH) * - All log fields must have FF tags * - Cannot emit logs in static context * @@ -32,34 +30,39 @@ include "../constants_gen.pil"; * - Disables memory reads if addressing error occurs * * Public inputs interaction: - * - Writes contract_address, log_size, and log data to public inputs + * - Writes log_size (number of fields read from memory), contract_address, and log data to public inputs, in that order. * - Only writes if no errors occur and discard flag is not set * - Updates the count of emitted unencrypted logs * - * * Partial trace of the gadget: - * +-----+-----+----------+------------+----------+------------------+-----------+-----------+-----------+-------+-------+-----+----------------+--------------+----------------+------------+--------------------+-----------------+-------+-----+ - * | sel | clk | space_id | log_addr | log_size | contract_address | prev_logs | next_logs | is_static | error | start | end | remaining_rows | tag_mismatch | seen_wrong_tag | is_padding | remaining_log_size | should_read_mem | value | tag | - * +-----+-----+----------+------------+----------+------------------+-----------+-----------+-----------+-------+-------+-----+----------------+--------------+----------------+------------+--------------------+-----------------+-------+-----+ - * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | - * | 1 | 27 | 4 | 27 | 10 | 0xdeadbeef | 1 | 2 | 0 | 0 | 1 | 0 | 12 | 0 | 0 | 0 | 10 | 1 | 5 | ff | - * | 1 | 27 | 4 | 28 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 11 | 0 | 0 | 0 | 9 | 1 | 6 | ff | - * | 1 | 27 | 4 | 29 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 10 | 0 | 0 | 0 | 8 | 1 | 7 | ff | - * | 1 | 27 | 4 | 30 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 9 | 0 | 0 | 0 | 7 | 1 | 8 | ff | - * | 1 | 27 | 4 | 31 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 8 | 0 | 0 | 0 | 6 | 1 | 9 | ff | - * | 1 | 27 | 4 | 32 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 7 | 0 | 0 | 0 | 5 | 1 | 8 | ff | - * | 1 | 27 | 4 | 33 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 6 | 0 | 0 | 0 | 4 | 1 | 7 | ff | - * | 1 | 27 | 4 | 34 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 5 | 0 | 0 | 0 | 3 | 1 | 6 | ff | - * | 1 | 27 | 4 | 35 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 4 | 0 | 0 | 0 | 2 | 1 | 5 | ff | - * | 1 | 27 | 4 | 36 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 3 | 0 | 0 | 0 | 1 | 1 | 4 | ff | - * | 1 | 27 | 4 | 37 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 2 | 0 | 0 | 1 | 0 | 0 | 0 | ff | - * | 1 | 27 | 4 | 38 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | ff | - * | 1 | 27 | 4 | 39 | 10 | 0xdeadbeef | unc | unc | unc | unc | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | ff | - * +-----+-----+----------+------------+----------+------------------+-----------+-----------+-----------+-------+-------+-----+----------------+--------------+----------------+------------+--------------------+-----------------+-------+-----+ + * + * +-----+-----+--------+----------+----------+---------------+------------+------------+-----------+-------+-------+-----+----------+------------+---------------+----------+-----------+----------+-------+-----+------------+ + * | sel | clk | spc_id | log_addr | log_size | contract_addr | prev_log_f | next_log_f | is_static | error | start | end | rem_rows | tag_msmtch | seen_wrng_tag | IS_W_LEN | is_w_addr | is_w_mem | value | tag | pi_value | + * +-----+-----+--------+----------+----------+---------------+------------+------------+-----------+-------+-------+-----+----------+------------+---------------+----------+-----------+----------+-------+-----+------------+ + * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | + * | 1 | 27 | 4 | 27 | 10 | 0xdeadbeef | 0 | 10 | 0 | 0 | 1 | 0 | 11 | 0 | 0 | 1 | 0 | 0 | 0 | ff | 10 | + * | 1 | 27 | 4 | 27 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 10 | 0 | 0 | 0 | 1 | 0 | 0 | ff | 0xdeadbeef | + * | 1 | 27 | 4 | 27 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 9 | 0 | 0 | 0 | 0 | 1 | 10 | ff | 10 | + * | 1 | 27 | 4 | 28 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 8 | 0 | 0 | 0 | 0 | 1 | 20 | ff | 20 | + * | 1 | 27 | 4 | 29 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 7 | 0 | 0 | 0 | 0 | 1 | 30 | ff | 30 | + * | 1 | 27 | 4 | 30 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 6 | 0 | 0 | 0 | 0 | 1 | 40 | ff | 40 | + * | 1 | 27 | 4 | 31 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 5 | 0 | 0 | 0 | 0 | 1 | 50 | ff | 50 | + * | 1 | 27 | 4 | 32 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 4 | 0 | 0 | 0 | 0 | 1 | 60 | ff | 60 | + * | 1 | 27 | 4 | 33 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 1 | 70 | ff | 70 | + * | 1 | 27 | 4 | 34 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 1 | 80 | ff | 80 | + * | 1 | 27 | 4 | 35 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 90 | ff | 90 | + * | 1 | 27 | 4 | 36 | unc | 0xdeadbeef | unc | unc | unc | unc | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 100 | ff | 100 | + * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | + * +-----+-----+--------+----------+----------+---------------+------------+------------+-----------+-------+-------+-----+----------+------------+---------------+----------+-----------+----------+-------+-----+------------+ * We perform some checks on start and only propagate down relevant columns to constrain the "worker" rows. Worker rows are the ones after start that only perform optional memory * read and optional writes to public inputs. Columns not propagated down are marked as "unc" in the table above. - * Rows with is_padding_row = 1 are rows where the user-provided log has ended and we are just padding to the PUBLIC_LOG_SIZE_IN_FIELDS. - * All rows interact with public inputs, but memory reads are controlled by a selector that is 1 when it's not padding and there has been no addressing error. + * In this trace: + * - There are three "types" of rows for one log emission: + * - IS_WRITE_LOG_LENGTH: This one processes the log length. This is the first row. + * - is_write_contract_address: This one processes the contract address. This is the second row. + * - is_write_memory_value: These ones process the log body, the values read from memory. These are the rest of rows. + * - Rows interact with public inputs conditionally if there is no error or no discard. + * - Rows interact with memory conditionally if there is no addressing error. Otherwise they'll read (given they are is_write_memory_value rows). */ namespace emit_unencrypted_log; @@ -74,8 +77,8 @@ namespace emit_unencrypted_log; pol commit log_address; pol commit log_size; pol commit contract_address; - pol commit prev_num_unencrypted_logs; - pol commit next_num_unencrypted_logs; + pol commit prev_num_unencrypted_log_fields; + pol commit next_num_unencrypted_log_fields; pol commit is_static; pol commit error; pol commit discard; @@ -107,7 +110,7 @@ namespace emit_unencrypted_log; end * (1 - sel) = 0; pol commit remaining_rows; - start * ((constants.PUBLIC_LOG_SIZE_IN_FIELDS - 1) - remaining_rows) = 0; + start * ((constants.PUBLIC_LOG_HEADER_LENGTH + log_size - 1) - remaining_rows) = 0; #[REMAINING_ROWS_DECREMENT] NOT_END * ((remaining_rows - 1) - remaining_rows') = 0; @@ -116,25 +119,9 @@ namespace emit_unencrypted_log; sel * (remaining_rows * (end * (1 - remaining_rows_inv) + remaining_rows_inv) - 1 + end) = 0; // =============== ERROR HANDLING =============== - - // Length check - - pol commit error_too_large; - error_too_large * (1 - error_too_large) = 0; - - // TODO: Column needed until we support constants in lookups - pol commit max_log_size; - start * (max_log_size - constants.PUBLIC_LOG_SIZE_IN_FIELDS) = 0; - - #[CHECK_LOG_SIZE_TOO_LARGE] - start { log_size, max_log_size, error_too_large } - in - gt.sel { gt.input_a, gt.input_b, gt.res }; - // Memory bounds check - pol commit error_out_of_bounds; - error_out_of_bounds * (1 - error_out_of_bounds) = 0; + pol commit error_out_of_bounds; // Constrained to be boolean by the lookup into gt. (provided that start == 1). // TODO: Column needed until we support constants in lookups pol commit max_mem_addr; @@ -146,26 +133,34 @@ namespace emit_unencrypted_log; #[CHECK_MEMORY_OUT_OF_BOUNDS] start { end_log_address, max_mem_addr, error_out_of_bounds } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; // Error out of bounds is propagated down to disable memory reads #[ERROR_OUT_OF_BOUNDS_CONSISTENCY] NOT_END * (error_out_of_bounds' - error_out_of_bounds) = 0; - // Log count check + // Log fields count check + + pol commit error_too_many_log_fields; // Constrained to be boolean by the lookup into gt. (provided that start == 1). + error_too_many_log_fields * (1 - error_too_many_log_fields) = 0; - pol commit error_too_many_logs; - error_too_many_logs * (1 - error_too_many_logs) = 0; + pol TOTAL_LOG_FIELDS_SIZE = constants.PUBLIC_LOG_HEADER_LENGTH + log_size; + pol commit expected_next_log_fields; + start * (prev_num_unencrypted_log_fields + TOTAL_LOG_FIELDS_SIZE - expected_next_log_fields) = 0; - // error_too_many_logs = prev_num_unencrypted_logs == MAX_PUBLIC_LOGS_PER_TX - // If we are at max logs, we can't emit any more logs. - pol MAX_LOGS_MINUS_EMITTED = constants.MAX_PUBLIC_LOGS_PER_TX - prev_num_unencrypted_logs; - pol commit max_logs_minus_emitted_inv; - start * (MAX_LOGS_MINUS_EMITTED * (error_too_many_logs * (1 - max_logs_minus_emitted_inv) + max_logs_minus_emitted_inv) - 1 + error_too_many_logs) = 0; + + // TODO: Column needed until we support constants in lookups + pol commit public_logs_payload_length; + start * (public_logs_payload_length - constants.FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH) = 0; + + #[CHECK_LOG_FIELDS_COUNT] + start { expected_next_log_fields, public_logs_payload_length, error_too_many_log_fields } + in + gt.sel { gt.input_a, gt.input_b, gt.res }; // Tag check - // Tag mismatch is propagated down to be checked on end + // Tag mismatch is hinted in the start row and propagated down to be checked on end pol commit error_tag_mismatch; error_tag_mismatch * (1 - error_tag_mismatch) = 0; @@ -174,55 +169,50 @@ namespace emit_unencrypted_log; // In order to constrain tag mismatch, we are going to use a helper column seen_wrong_tag, which we'll use to // check on end if we've seen 1 or more wrong tags. - // We'll use the WRONG_TAG variable, which is a boolean indicating whether the tag is wrong for the current row. - // WRONG_TAG will be constrained in the memory section. + // We'll use the WRONG_NEXT_TAG variable, which is a boolean indicating whether the tag is wrong for the next row. + // WRONG_NEXT_TAG will be constrained in the memory section. pol commit seen_wrong_tag; seen_wrong_tag * (1 - seen_wrong_tag) = 0; - // Initial value of seen_wrong_tag is the first wrong tag flag - start * (WRONG_TAG - seen_wrong_tag) = 0; + // Initial value of seen_wrong_tag is false + start * seen_wrong_tag = 0; // Conditional assignment: - // if next_wrong_tag { next_seen_wrong_tag = 1 } + // if WRONG_NEXT_TAG { next_seen_wrong_tag = 1 } // else { next_seen-wrong_tag = seen_wrong_tag } #[WRONG_TAG_CHECK] - NOT_END * ((1 - seen_wrong_tag) * WRONG_TAG' + seen_wrong_tag - seen_wrong_tag') = 0; + NOT_END * ((1 - seen_wrong_tag) * WRONG_NEXT_TAG + seen_wrong_tag - seen_wrong_tag') = 0; // When ending, seen_wrong_tag == error_tag_mismatch end * (error_tag_mismatch - seen_wrong_tag) = 0; // Union error handling - // error = error_too_large | error_out_of_bounds | error_too_many_logs | error_tag_mismatch | is_static - // we split the above computation in 2 to reduce the degree of the full relation (7) + // error = error_out_of_bounds | error_too_many_logs | error_tag_mismatch | is_static + // we split the above computation in 2 to reduce the degree of the full relation pol commit error_too_many_logs_wrong_tag_is_static; - start * ((1 - error_too_many_logs) * (1 - error_tag_mismatch) * (1 - is_static) - (1 - error_too_many_logs_wrong_tag_is_static)) = 0; - start * ((1 - error_too_large) * (1 - error_out_of_bounds) * (1 - error_too_many_logs_wrong_tag_is_static) - (1 - error)) = 0; - - // We write to public inputs if we don't have an error at all and if discard is off - pol commit sel_should_write_to_public_inputs; - start * ((1 - error) * (1 - discard) - sel_should_write_to_public_inputs) = 0; - #[SEL_SHOULD_WRITE_TO_PUBLIC_INPUTS_CONSISTENCY] - NOT_END * (sel_should_write_to_public_inputs' - sel_should_write_to_public_inputs) = 0; - - // Increase next num logs emitted if error is off - sel * (prev_num_unencrypted_logs + (1 - error) - next_num_unencrypted_logs) = 0; - - // =============== PADDING ROWS ============== - pol commit is_padding_row; - is_padding_row * (1 - is_padding_row) = 0; - - pol commit remaining_log_size; - start * (log_size - remaining_log_size) = 0; - - // Decrement remaining log size until it's 0 - #[REMAINING_LOG_SIZE_DECREMENT] - NOT_END * (1 - is_padding_row) *((remaining_log_size - 1) - remaining_log_size') = 0; - - // is_padding_row = remaining_log_size == 0 - pol commit remaining_log_size_inv; - sel * (remaining_log_size * (is_padding_row * (1 - remaining_log_size_inv) + remaining_log_size_inv) - 1 + is_padding_row) = 0; + start * ((1 - error_too_many_log_fields) * (1 - error_tag_mismatch) * (1 - is_static) - (1 - error_too_many_logs_wrong_tag_is_static)) = 0; + start * ((1 - error_out_of_bounds) * (1 - error_too_many_logs_wrong_tag_is_static) - (1 - error)) = 0; + + // Increase next num logs emitted if error is off. We increase even in the discard case, since discard only implies not writing to public inputs. + start * (prev_num_unencrypted_log_fields + (1 - error) * TOTAL_LOG_FIELDS_SIZE - next_num_unencrypted_log_fields) = 0; + + // =============== SEQUENCE =============== + // We write the log length in the start row + pol IS_WRITE_LOG_LENGTH = start; + // is_write_contract_address will only be on in the row after start. There will always be a row after start, the contract address one. + pol commit is_write_contract_address; + is_write_contract_address * (1 - is_write_contract_address) = 0; + is_write_contract_address' = start; + // is_write_memory_value starts off and is turned on after is_write_contract_address + pol commit is_write_memory_value; + is_write_memory_value * (1 - is_write_memory_value) = 0; + start * is_write_memory_value = 0; + // if we are not ending, the next is_write_memory_value is equal to current is_write_memory_value or is_write_contract_address + // since both can't be on at the same time, we can use add. + NOT_END * (is_write_memory_value + is_write_contract_address - is_write_memory_value') = 0; // =============== MEMORY READ =============== + // For simplicity, we always generate all rows necessary to process a log, even in error cases. // We have the following error cases: // - error_memory_out_of_bounds: can't read mem // - error_tag_mismatch: we need to read mem @@ -231,10 +221,10 @@ namespace emit_unencrypted_log; // So what we landed with is to always read memory except we can't read memory due to the first case. // This should be fine since the user already paid for the memory reads pol commit sel_should_read_memory; - sel * ((1 - error_out_of_bounds) * (1 - is_padding_row) - sel_should_read_memory) = 0; + sel_should_read_memory = is_write_memory_value * (1 - error_out_of_bounds); #[LOG_ADDRESS_INCREMENT] - NOT_END * ((log_address + 1) - log_address') = 0; + NOT_END * ((log_address + is_write_memory_value) - log_address') = 0; pol commit value; pol commit tag; @@ -246,14 +236,14 @@ namespace emit_unencrypted_log; #[READ_MEM] sel_should_read_memory { - execution_clk, log_address, - value, tag, - space_id, /*rw=0*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + execution_clk, space_id, + log_address, value, + tag, /*rw=0*/ precomputed.zero + } is + memory.sel_unencrypted_log_read { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; // If memory read is disabled, value is zero and tag is FF @@ -263,39 +253,45 @@ namespace emit_unencrypted_log; // Tag should be zero since that's the FF tag. pol commit correct_tag; correct_tag * (1 - correct_tag) = 0; - pol WRONG_TAG = 1 - correct_tag; + pol WRONG_NEXT_TAG = 1 - correct_tag'; pol commit tag_inv; sel * (tag * (correct_tag * (1 - tag_inv) + tag_inv) - 1 + correct_tag) = 0; // =============== WRITE TO PI =============== + + // We write to public inputs if we don't have an error at all and if discard is off. + // Remember that we will always generate all rows necessary to process a log, it's just that we'll + // gate the writes to public inputs based on the error and discard flags. + pol commit sel_should_write_to_public_inputs; + start * ((1 - error) * (1 - discard) - sel_should_write_to_public_inputs) = 0; + #[SEL_SHOULD_WRITE_TO_PUBLIC_INPUTS_CONSISTENCY] + NOT_END * (sel_should_write_to_public_inputs' - sel_should_write_to_public_inputs) = 0; + pol commit public_inputs_index; - // Public inputs index starts at base index + prev_num_unencrypted_logs * PUBLIC_LOG_SIZE_IN_FIELDS - start * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_LOGS_ROW_IDX + prev_num_unencrypted_logs * constants.PUBLIC_LOG_SIZE_IN_FIELDS - public_inputs_index) = 0; + start * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_LOGS_ROW_IDX + constants.FLAT_PUBLIC_LOGS_HEADER_LENGTH + prev_num_unencrypted_log_fields - public_inputs_index) = 0; // Increment public inputs index until end NOT_END * (public_inputs_index + 1 - public_inputs_index') = 0; #[CONTRACT_ADDRESS_CONSISTENCY] NOT_END * (contract_address - contract_address') = 0; - #[LOG_SIZE_CONSISTENCY] - NOT_END * (log_size - log_size') = 0; - #[WRITE_LOG_TO_PUBLIC_INPUTS] + pol commit public_inputs_value; + IS_WRITE_LOG_LENGTH * (log_size - public_inputs_value) = 0; + is_write_contract_address * (contract_address - public_inputs_value) = 0; + is_write_memory_value * (value - public_inputs_value) = 0; + + #[WRITE_DATA_TO_PUBLIC_INPUTS] sel_should_write_to_public_inputs { public_inputs_index, - contract_address, - log_size, - value + public_inputs_value } in public_inputs.sel { precomputed.clk, - public_inputs.cols[0], - public_inputs.cols[1], - public_inputs.cols[2] + public_inputs.cols[0] }; - //////////////////////////////////////////////////////// // Dispatch from execution trace to Emit Unencrypted Log //////////////////////////////////////////////////////// @@ -304,13 +300,13 @@ namespace emit_unencrypted_log; precomputed.clk, execution.context_id, // Message offset - execution.rop[0], + execution.rop[1], // Message size - execution.register[1], + execution.register[0], execution.contract_address, // Context - execution.prev_num_unencrypted_logs, - execution.num_unencrypted_logs, + execution.prev_num_unencrypted_log_fields, + execution.num_unencrypted_log_fields, execution.is_static, // Error execution.sel_opcode_error, @@ -322,8 +318,8 @@ namespace emit_unencrypted_log; log_address, log_size, contract_address, - prev_num_unencrypted_logs, - next_num_unencrypted_logs, + prev_num_unencrypted_log_fields, + next_num_unencrypted_log_fields, is_static, error, discard diff --git a/barretenberg/cpp/pil/vm2/opcodes/external_call.pil b/barretenberg/cpp/pil/vm2/opcodes/external_call.pil index 4a8d3aec4f95..b23563da6bc4 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/external_call.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/external_call.pil @@ -8,55 +8,39 @@ namespace execution; #[skippable_if] sel_enter_call = 0; -// TODO: Remove this as a column when we can lookup with constants -pol commit constant_32; -sel_enter_call * (32 - constant_32) = 0; - // ==== GAS CLAMPING ==== // Guaranteed not to wrap since we never put a gas used > gas limit -pol L2_GAS_LEFT = l2_gas_limit - l2_gas_used; -pol DA_GAS_LEFT = da_gas_limit - da_gas_used; + +// TODO: Once we support expression in lookup, we can replace this column by an alias. +pol commit l2_gas_left; +l2_gas_left = sel_enter_call * (l2_gas_limit - l2_gas_used); + +// TODO: Once we support expression in lookup, we can replace this column by an alias. +pol commit da_gas_left; +da_gas_left = sel_enter_call * (da_gas_limit - da_gas_used); + // L2 gas clamping // Compare the gas allocated to the call by the user, with the gas left. // Helper column containing whether the allocated gas is less than the left gas -pol commit call_is_l2_gas_allocated_lt_left; -call_is_l2_gas_allocated_lt_left * (1 - call_is_l2_gas_allocated_lt_left) = 0; +pol commit call_is_l2_gas_allocated_lt_left; // Guaranteed to be boolean through the gt lookup. (provided that sel_enter_call == 1) +#[CALL_IS_L2_GAS_ALLOCATED_LT_LEFT] +sel_enter_call { l2_gas_left, register[0], call_is_l2_gas_allocated_lt_left } in gt.sel_others { gt.input_a, gt.input_b, gt.res }; -pol ALLOCATED_GTE_LEFT_L2 = register[0] - L2_GAS_LEFT; -pol ALLOCATED_LT_LEFT_L2 = L2_GAS_LEFT - register[0] - 1; -pol commit call_allocated_left_l2_cmp_diff; -sel_enter_call * ((ALLOCATED_LT_LEFT_L2 - ALLOCATED_GTE_LEFT_L2) * call_is_l2_gas_allocated_lt_left + ALLOCATED_GTE_LEFT_L2 - call_allocated_left_l2_cmp_diff) = 0; +// next row's l2_gas_limit = if call_is_l2_gas_allocated_lt_left { register[0] } else { l2_gas_left } +sel_enter_call * ((register[0] - l2_gas_left) * call_is_l2_gas_allocated_lt_left + l2_gas_left - l2_gas_limit') = 0; -#[CALL_ALLOCATED_LEFT_L2_RANGE] -sel_enter_call { call_allocated_left_l2_cmp_diff, constant_32 } -in -range_check.sel - { range_check.value, range_check.rng_chk_bits }; - -// next row's l2_gas_limit = if call_is_l2_gas_allocated_lt_left { register[0] } else { L2_GAS_LEFT } -sel_enter_call * ((register[0] - L2_GAS_LEFT) * call_is_l2_gas_allocated_lt_left + L2_GAS_LEFT - l2_gas_limit') = 0; // DA gas clamping // Compare the gas allocated to the call by the user, with the gas left. // Helper column containing whether the allocated gas is less than the left gas -pol commit call_is_da_gas_allocated_lt_left; -call_is_da_gas_allocated_lt_left * (1 - call_is_da_gas_allocated_lt_left) = 0; - -pol ALLOCATED_GTE_LEFT_DA = register[1] - DA_GAS_LEFT; -pol ALLOCATED_LT_LEFT_DA = DA_GAS_LEFT - register[1] - 1; -pol commit call_allocated_left_da_cmp_diff; -sel_enter_call * ((ALLOCATED_LT_LEFT_DA - ALLOCATED_GTE_LEFT_DA) * call_is_da_gas_allocated_lt_left + ALLOCATED_GTE_LEFT_DA - call_allocated_left_da_cmp_diff) = 0; - -#[CALL_ALLOCATED_LEFT_DA_RANGE] -sel_enter_call { call_allocated_left_da_cmp_diff, constant_32 } -in -range_check.sel - { range_check.value, range_check.rng_chk_bits }; +pol commit call_is_da_gas_allocated_lt_left; // Guaranteed to be boolean through the gt lookup. (provided that sel_enter_call == 1) -// next row's da_gas_limit = if call_is_da_gas_allocated_lt_left { reg2 } else { DA_GAS_LEFT } -sel_enter_call * ((register[1] - DA_GAS_LEFT) * call_is_da_gas_allocated_lt_left + DA_GAS_LEFT - da_gas_limit') = 0; +#[CALL_IS_DA_GAS_ALLOCATED_LT_LEFT] +sel_enter_call { da_gas_left, register[1], call_is_da_gas_allocated_lt_left } in gt.sel_others { gt.input_a, gt.input_b, gt.res }; +// next row's da_gas_limit = if call_is_da_gas_allocated_lt_left { register[1] } else { da_gas_left } +sel_enter_call * ((register[1] - da_gas_left) * call_is_da_gas_allocated_lt_left + da_gas_left - da_gas_limit') = 0; diff --git a/barretenberg/cpp/pil/vm2/opcodes/get_contract_instance.pil b/barretenberg/cpp/pil/vm2/opcodes/get_contract_instance.pil index 201063361291..f5eb3c993437 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/get_contract_instance.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/get_contract_instance.pil @@ -192,38 +192,37 @@ namespace get_contract_instance; member_tag = is_valid_writes_in_bounds * constants.MEM_TAG_FF; // Write to memory (write both exists and member) - // FIXME: these should eventually be a permutation. #[MEM_WRITE_CONTRACT_INSTANCE_EXISTS] is_valid_member_enum { clk, + space_id, /*address=*/ dst_offset, // (aka exists_write_offset) /*value=*/ instance_exists, /*tag=*/ exists_tag, - /*rw=*/ is_valid_member_enum, // 1: write - space_id - } in memory.sel { + /*rw=*/ is_valid_member_enum // 1: write + } is memory.sel_get_contract_instance_exists_write { memory.clk, + memory.space_id, memory.address, memory.value, memory.tag, - memory.rw, - memory.space_id + memory.rw }; // TODO(dbanks12): consider reusing a single memory permutation for both writes, // or do the first write in execution (after returning `exists` to execution). #[MEM_WRITE_CONTRACT_INSTANCE_MEMBER] is_valid_member_enum { clk, + space_id, /*address=*/ member_write_offset, /*value=*/ selected_member, /*tag=*/ member_tag, - /*rw=*/ is_valid_member_enum, // 1: write - space_id - } in memory.sel { + /*rw=*/ is_valid_member_enum // 1: write + } is memory.sel_get_contract_instance_member_write { memory.clk, + memory.space_id, memory.address, memory.value, memory.tag, - memory.rw, - memory.space_id + memory.rw }; diff --git a/barretenberg/cpp/pil/vm2/opcodes/internal_call.pil b/barretenberg/cpp/pil/vm2/opcodes/internal_call.pil index c67ae605fe2b..a48cc5c15bdf 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/internal_call.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/internal_call.pil @@ -9,22 +9,33 @@ namespace execution; #[skippable_if] sel = 0; - pol SWITCH_CALL_ID = sel_execute_internal_call + sel_execute_internal_return; - pol PROPAGATE_CALL_ID = (1 - SWITCH_CALL_ID) * (1 - enqueued_call_start); + // sel_enter_call && enqueued_call_start' are mutually exclusive. sel_enter_call is constrained to be non-erroring, therefore the next row cannot be enqueued call start + // When we encounter this case, the internal call information is reset on the next row (i.e. internal_call_id = 1, return_id = 0 and next_internal_call_id = 2) + pol RESET_NEXT_CALL_ID = sel_enter_call + enqueued_call_start'; + + // sel_exit_call includes external exit cases (revert, return, any execution error). + // sel_execute_internal_return could be 1 and sel_opcode_error could be 1 at the same time (which would set sel_error=1 and sel_exit_call=1), + // so we need to gate them by (1 - sel_error) here. We gate both of them out of excessive caution. + // When we encounter this case, the internal call information in the next row is constrained to change (incremented, unwound, etc) + pol NEW_NEXT_CALL_ID = (sel_execute_internal_call + sel_execute_internal_return) * (1 - sel_error) + sel_exit_call; + + // This is an XOR + pol RESET_OR_NEW_NEXT_CALL_ID = (RESET_NEXT_CALL_ID + NEW_NEXT_CALL_ID) - (RESET_NEXT_CALL_ID * NEW_NEXT_CALL_ID); + pol PROPAGATE_CALL_ID = 1 - RESET_OR_NEW_NEXT_CALL_ID; // ============================= // === Internal Call Pointer === // ============================= // The following is grouped together as the Internal Call Pointer struct in the simulator - // The current call id, constrained to be 1 at the start of an enqueued call + // The current call id, constrained to be 1 at the start of a new context pol commit internal_call_id; #[CALL_ID_STARTS_ONE] - enqueued_call_start * (internal_call_id - 1) = 0; + RESET_NEXT_CALL_ID * (internal_call_id' - 1) = 0; // If we encounter a sel_execute_internal_call, the next internal_call_id is the current next_internal_call_id #[NEW_CALL_ID_ON_CALL] sel_execute_internal_call * (internal_call_id' - next_internal_call_id) = 0; - // If we encounter a sel_return_call, the next internal call id is the current inter_call_return_id + // If we encounter a sel_return_call, the next internal_call_id is the current internal_call_return_id #[RESTORE_INTERNAL_ID_ON_RETURN] sel_execute_internal_return * (internal_call_id' - internal_call_return_id) = 0; // Otherwise it's propagated down @@ -34,7 +45,7 @@ namespace execution; // The call id when the next internal return is invoked, constrained to be 0 at the start of an enqueued call pol commit internal_call_return_id; #[RET_ID_STARTS_ZERO] - enqueued_call_start * internal_call_return_id = 0; + RESET_NEXT_CALL_ID * internal_call_return_id' = 0; // If we encounter a sel_execute_internal_call, the next internal_call_return_id is the current internal_call_id #[NEW_RETURN_ID_ON_CALL] sel_execute_internal_call * (internal_call_return_id' - internal_call_id) = 0; @@ -46,10 +57,10 @@ namespace execution; // Constrained to start at 2 at the start of an enqueued call pol commit next_internal_call_id; #[NEXT_CALL_ID_STARTS_TWO] - enqueued_call_start * (next_internal_call_id - 2) = 0; - // If we encounter a sel_execute_internal_call, we increment the next next_internal_call_id + RESET_NEXT_CALL_ID * (next_internal_call_id' - 2) = 0; + // If we encounter a sel_execute_internal_call, we increment the next next_internal_call_id, unless we are changing context #[INCR_NEXT_INT_CALL_ID] - NOT_LAST_EXEC * (next_internal_call_id' - (next_internal_call_id + sel_execute_internal_call)) = 0; + NOT_LAST_EXEC * (1 - RESET_OR_NEW_NEXT_CALL_ID) * (next_internal_call_id' - (next_internal_call_id + sel_execute_internal_call)) = 0; // ============================= // === Error Handling ==== diff --git a/barretenberg/cpp/pil/vm2/opcodes/l1_to_l2_message_exists.pil b/barretenberg/cpp/pil/vm2/opcodes/l1_to_l2_message_exists.pil index df0caec7a857..ebf33894c96a 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/l1_to_l2_message_exists.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/l1_to_l2_message_exists.pil @@ -18,8 +18,8 @@ namespace execution; // this is a virtual gadget that shares rows with the execu #[skippable_if] sel_execute_l1_to_l2_message_exists = 0; // from execution.pil. + // Constrained to be boolean by the lookup into gt. (provided that sel_execute_l1_to_l2_message_exists == 1). pol commit l1_to_l2_msg_leaf_in_range; - l1_to_l2_msg_leaf_in_range * (1 - l1_to_l2_msg_leaf_in_range) = 0; // TODO: We need this temporarily while we do not allow for aliases in the lookup tuple pol commit l1_to_l2_msg_tree_leaf_count; @@ -30,7 +30,7 @@ namespace execution; // this is a virtual gadget that shares rows with the execu l1_to_l2_msg_tree_leaf_count, register[1], // leaf_index input l1_to_l2_msg_leaf_in_range - } in gt.sel { + } in gt.sel_others { gt.input_a, gt.input_b, gt.res diff --git a/barretenberg/cpp/pil/vm2/opcodes/notehash_exists.pil b/barretenberg/cpp/pil/vm2/opcodes/notehash_exists.pil index f40f330a0838..739fd52d1d37 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/notehash_exists.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/notehash_exists.pil @@ -19,8 +19,8 @@ namespace execution; // this is a virtual gadget that shares rows with the execu #[skippable_if] sel_execute_notehash_exists = 0; // from execution.pil. + // Constrained to be boolean by the lookup into gt. (provided that sel_execute_notehash_exists == 1). pol commit note_hash_leaf_in_range; - note_hash_leaf_in_range * (1 - note_hash_leaf_in_range) = 0; // TODO: We need this temporarily while we do not allow for aliases in the lookup tuple pol commit note_hash_tree_leaf_count; @@ -31,7 +31,7 @@ namespace execution; // this is a virtual gadget that shares rows with the execu note_hash_tree_leaf_count, register[1], // leaf_index input note_hash_leaf_in_range - } in gt.sel { + } in gt.sel_others { gt.input_a, gt.input_b, gt.res diff --git a/barretenberg/cpp/pil/vm2/opcodes/nullifier_exists.pil b/barretenberg/cpp/pil/vm2/opcodes/nullifier_exists.pil index 9b1d43058c80..0cafe15fe71b 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/nullifier_exists.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/nullifier_exists.pil @@ -1,3 +1,5 @@ +include "../constants_gen.pil"; + /** * This virtual gadget implements the NullifierExists opcode, which checks if a nullifier exists * in the nullifier tree for a given contract address. @@ -34,7 +36,7 @@ namespace execution; // this is a virtual gadget that shares rows with the execu // Inputs /*nullifier=*/ register[0], // input: nullifier to check prev_nullifier_tree_root, // input: tree root from context - /*should_silo=1*/ sel_execute_nullifier_exists, // input: should_silo = 1 (always silo for contract nullifiers) + /*should_silo=1*/ sel_execute_nullifier_exists, // input: should_silo = 1 (always silo for contract nullifiers) /*contract_address=*/ register[1] // input: contract address for siloing } in nullifier_check.sel { // Outputs diff --git a/barretenberg/cpp/pil/vm2/opcodes/sload.pil b/barretenberg/cpp/pil/vm2/opcodes/sload.pil index 7a5f238f4078..cb451c75946f 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/sload.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/sload.pil @@ -21,9 +21,6 @@ namespace execution; // this is a virtual gadget that shares rows with the execu #[skippable_if] sel_execute_sload = 0; // from execution.pil. - #[SLOAD_SUCCESS] - sel_execute_sload * sel_opcode_error = 0; - #[STORAGE_READ] sel_execute_sload { contract_address, // From context @@ -40,3 +37,5 @@ namespace execution; // this is a virtual gadget that shares rows with the execu #[SLOAD_FF_OUTPUT_TAG] sel_execute_sload * (constants.MEM_TAG_FF - mem_tag_reg[1]) = 0; + #[SLOAD_SUCCESS] + sel_execute_sload * sel_opcode_error = 0; diff --git a/barretenberg/cpp/pil/vm2/opcodes/sstore.pil b/barretenberg/cpp/pil/vm2/opcodes/sstore.pil index d05fa8689411..4670a0f2604a 100644 --- a/barretenberg/cpp/pil/vm2/opcodes/sstore.pil +++ b/barretenberg/cpp/pil/vm2/opcodes/sstore.pil @@ -1,4 +1,5 @@ include "../constants_gen.pil"; +include "../precomputed.pil"; include "../trees/public_data_check.pil"; include "../trees/written_public_data_slots_tree_check.pil"; @@ -70,24 +71,25 @@ namespace execution; // this is a virtual gadget that shares rows with the execu written_public_data_slots_tree_check.tree_size_after_write }; - // This should be a multipermutation #[STORAGE_WRITE] sel_write_public_data { register[0], // value contract_address, register[1], // slot + discard, prev_public_data_tree_root, - prev_public_data_tree_size, public_data_tree_root, + prev_public_data_tree_size, public_data_tree_size, precomputed.clk - } in public_data_check.sel { + } is public_data_check.non_protocol_write { public_data_check.value, public_data_check.address, public_data_check.slot, + public_data_check.discard, public_data_check.root, - public_data_check.tree_size_before_write, public_data_check.write_root, + public_data_check.tree_size_before_write, public_data_check.tree_size_after_write, public_data_check.clk }; diff --git a/barretenberg/cpp/pil/vm2/poseidon2_hash.pil b/barretenberg/cpp/pil/vm2/poseidon2_hash.pil index 241a758c3438..a9b6c587e222 100644 --- a/barretenberg/cpp/pil/vm2/poseidon2_hash.pil +++ b/barretenberg/cpp/pil/vm2/poseidon2_hash.pil @@ -2,6 +2,14 @@ include "poseidon2_perm.pil"; // Performs the poseidon2 full hash // It is **mostly** well-constrained +// === Usage Tips === +// - To constrain the result of a poseidon hash over a single row (i.e. 3 or fewer inputs), lookup the inputs, +// output, end (= 1), and start (= 1). This trace constrains that when start = end = 1, we have a single row. +// - Over multiple rows, use num_perm_rounds_rem against the decrementing counter to ensure that each round is +// computed in the correct order (at end = 1, num_perm_rounds_rem is constrained to be 1 => can equivalently +// lookup the end selector for the final row). +// - Over multiple rows, lookup the start selector at the expected first row to ensure no extra rows are prepended. +// Note that input_len is only constrained at the start row, so it cannot be relied on for intermediate rows. namespace poseidon2_hash; pol commit sel; diff --git a/barretenberg/cpp/pil/vm2/poseidon2_mem.pil b/barretenberg/cpp/pil/vm2/poseidon2_mem.pil index 805ca90b89c8..3c3aa8e2a4db 100644 --- a/barretenberg/cpp/pil/vm2/poseidon2_mem.pil +++ b/barretenberg/cpp/pil/vm2/poseidon2_mem.pil @@ -13,8 +13,8 @@ include "gt.pil"; * There are two errors that need to be handled as part of this trace * (1) MEM_OUT_OF_BOUNDS_ACCESS: If the reads or writes would access a memory address * outside of the max AVM memory address (AVM_HIGHEST_MEM_ADDRESS). - * (2) INVALID_READ_MEM_TAG: If any value that is read is not of type Field. - * + * (2) INVALID_READ_MEM_TAG: If any value that is read is not of type Field. + * * N.B This subtrace will load the values directly into a row, (i.e. 4 input cols) * As a result of this, error (2) can only be captured after all 4 reads are performed * this is because gating each subsequent read would be expensive. @@ -41,7 +41,7 @@ namespace poseidon2_perm_mem; read_address[1] = sel * (read_address[0] + 1); read_address[2] = sel * (read_address[0] + 2); read_address[3] = sel * (read_address[0] + 3); // This is the max read addr - + #[WRITE_ADDR_INCR] write_address[1] = sel * (write_address[0] + 1); write_address[2] = sel * (write_address[0] + 2); @@ -50,10 +50,8 @@ namespace poseidon2_perm_mem; //////////////////////////////////////////////// // Error Handling - Out of Range Memory Access //////////////////////////////////////////////// - pol commit sel_src_out_of_range_err; - sel_src_out_of_range_err * (1 - sel_src_out_of_range_err) = 0; - pol commit sel_dst_out_of_range_err; - sel_dst_out_of_range_err * (1 - sel_dst_out_of_range_err) = 0; + pol commit sel_src_out_of_range_err; // Constrained to be boolean by the lookup into gt. (provided that sel == 1). + pol commit sel_dst_out_of_range_err; // Constrained to be boolean by the lookup into gt. (provided that sel == 1). // Use the comparison gadget to check that the max addresses are within range // The comparison gadget provides the ability to test GreaterThan so we check @@ -64,12 +62,12 @@ namespace poseidon2_perm_mem; #[CHECK_SRC_ADDR_IN_RANGE] sel { read_address[3], max_mem_addr, sel_src_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; #[CHECK_DST_ADDR_IN_RANGE] sel { write_address[3], max_mem_addr, sel_dst_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; //////////////////////////////////////////////// // Read from memory into input columns @@ -79,59 +77,59 @@ namespace poseidon2_perm_mem; pol commit sel_should_read_mem; sel_should_read_mem = sel * (1 - sel_src_out_of_range_err) * (1 - sel_dst_out_of_range_err); - // TODO: These need to be changed to permutations once we have the custom permutation selectors impl #[POS_READ_MEM_0] - sel_should_read_mem { - execution_clk, read_address[0], - input[0], input_tag[0], - space_id, /*rw=*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_read_mem { + execution_clk, space_id, + read_address[0], input[0], + input_tag[0], /*rw=*/ precomputed.zero + } is + memory.sel_poseidon2_read[0] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_READ_MEM_1] - sel_should_read_mem { - execution_clk, read_address[1], - input[1], input_tag[1], - space_id, /*rw=*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_read_mem { + execution_clk, space_id, + read_address[1], input[1], + input_tag[1], /*rw=*/ precomputed.zero + } is + memory.sel_poseidon2_read[1] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_READ_MEM_2] - sel_should_read_mem { - execution_clk, read_address[2], - input[2], input_tag[2], - space_id, /*rw=*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_read_mem { + execution_clk, space_id, + read_address[2], input[2], + input_tag[2], /*rw=*/ precomputed.zero + } is + memory.sel_poseidon2_read[2] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_READ_MEM_3] - sel_should_read_mem { - execution_clk, read_address[3], - input[3], input_tag[3], - space_id, /*rw=*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_read_mem { + execution_clk, space_id, + read_address[3], input[3], + input_tag[3], /*rw=*/ precomputed.zero + } is + memory.sel_poseidon2_read[3] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; //////////////////////////////////////////////// // Error Handling - Invalid Input Tag (not FF) //////////////////////////////////////////////// pol commit sel_invalid_tag_err; + sel_invalid_tag_err * (1 - sel_invalid_tag_err) = 0; pol INPUT_TAG_DIFF_0 = input_tag[0] - constants.MEM_TAG_FF; pol INPUT_TAG_DIFF_1 = input_tag[1] - constants.MEM_TAG_FF; pol INPUT_TAG_DIFF_2 = input_tag[2] - constants.MEM_TAG_FF; @@ -141,7 +139,7 @@ namespace poseidon2_perm_mem; + 2**6 * INPUT_TAG_DIFF_2 + 2**9 * INPUT_TAG_DIFF_3; pol commit batch_tag_inv; - // BATCHED_TAG_CHECK != 0, sel_invalid_tag_err = 1 + // BATCHED_TAG_CHECK != 0, sel_invalid_tag_err = 1 #[BATCH_ZERO_CHECK] BATCHED_TAG_CHECK * ((1 - sel_invalid_tag_err) * (1 - batch_tag_inv) + batch_tag_inv) - sel_invalid_tag_err = 0; @@ -172,51 +170,51 @@ namespace poseidon2_perm_mem; //////////////////////////////////////////////// // TODO: These need to be changed to permutations once we have the custom permutation selectors #[POS_WRITE_MEM_0] - sel_should_exec { - execution_clk, write_address[0], - output[0], /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, + write_address[0], output[0], + /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_poseidon2_write[0] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_WRITE_MEM_1] - sel_should_exec { - execution_clk, write_address[1], - output[1], /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, + write_address[1], output[1], + /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_poseidon2_write[1] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_WRITE_MEM_2] - sel_should_exec { - execution_clk, write_address[2], - output[2], /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, + write_address[2], output[2], + /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_poseidon2_write[2] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[POS_WRITE_MEM_3] - sel_should_exec { - execution_clk, write_address[3], - output[3], /*FF_mem_tag*/ precomputed.zero, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_exec { + execution_clk, space_id, + write_address[3], output[3], + /*FF_mem_tag*/ precomputed.zero, /*rw=1*/ sel_should_exec + } is + memory.sel_poseidon2_write[3] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; //////////////////////////////////////////////// diff --git a/barretenberg/cpp/pil/vm2/precomputed.pil b/barretenberg/cpp/pil/vm2/precomputed.pil index 72c67e524740..2e54c00e6470 100644 --- a/barretenberg/cpp/pil/vm2/precomputed.pil +++ b/barretenberg/cpp/pil/vm2/precomputed.pil @@ -151,6 +151,8 @@ pol constant sel_revertible_append_note_hash; pol constant sel_revertible_append_nullifier; pol constant sel_revertible_append_l2_l1_msg; pol constant sel_collect_fee; +pol constant sel_tree_padding; +pol constant sel_cleanup; pol constant sel_can_emit_note_hash; pol constant sel_can_emit_nullifier; pol constant sel_can_write_public_data; @@ -184,3 +186,8 @@ pol constant is_valid_member_enum; pol constant is_deployer; pol constant is_class_id; pol constant is_init_hash; + +// Protocol Contract Set - Maps Canonical Address to Derived Address +// Canonical Address are represented by the execution clk +pol commit sel_protocol_contract; +pol commit protocol_contract_derived_address; diff --git a/barretenberg/cpp/pil/vm2/range_check.pil b/barretenberg/cpp/pil/vm2/range_check.pil index be0839d4a98f..f088adaa289d 100644 --- a/barretenberg/cpp/pil/vm2/range_check.pil +++ b/barretenberg/cpp/pil/vm2/range_check.pil @@ -1,7 +1,29 @@ +// Usage: +// +// sel { val, num_bits } in range_check.sel { range_check.value, range_check.rng_chk_bits }; +// +// Asserts that val < 2^num_bits. +// Supported for any 0 <= val < 2^128. Any val > 2^128 is not satisfiable (would fail #[CHECK_RECOMPOSITION] combined with +// the 16-bit range checks #[R0_IS_U16]...#[R7_IS_U16] on the limbs). +// Note that we use the unsatisfiability of val > 2^128 as part of an assumption in gt.pil. Should a modification +// here changes the assumption, the gt.pil needs to be modified as well. namespace range_check; // Range check selector pol commit sel; sel * (1 - sel) = 0; + + // This is used to decouple generation of inverses of lookups into this trace. + pol commit sel_keccak; + pol commit sel_gt; + pol commit sel_memory; + pol commit sel_alu; + sel_keccak * (1 - sel_keccak) = 0; + sel_gt * (1 - sel_gt) = 0; + sel_memory * (1 - sel_memory) = 0; + sel_alu * (1 - sel_alu) = 0; + // If any of the above selectors is 1, then sel must be 1. + (sel_keccak + sel_gt + sel_memory + sel_alu) * (1 - sel) = 0; + // No relations will be checked if this identity is satisfied. #[skippable_if] sel = 0; diff --git a/barretenberg/cpp/pil/vm2/sha256.pil b/barretenberg/cpp/pil/vm2/sha256.pil index 7087f14175d3..fa1551cb1f45 100644 --- a/barretenberg/cpp/pil/vm2/sha256.pil +++ b/barretenberg/cpp/pil/vm2/sha256.pil @@ -152,11 +152,11 @@ namespace sha256; #[RANGE_COMP_W_LHS] sel_compute_w { two_pow_32, computed_w_lhs, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_W_RHS] sel_compute_w { two_pow_32, computed_w_rhs, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; // ========== Compute w_s0 =================== // w[i-15] `rotr` 7 @@ -171,7 +171,7 @@ namespace sha256; #[RANGE_RHS_W_7] sel_compute_w { two_pow_7, rhs_w_7, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_7 // w[i-15] `rotr` 18 pol commit w_15_rotr_18; @@ -185,7 +185,7 @@ namespace sha256; #[RANGE_RHS_W_18] sel_compute_w { two_pow_18, rhs_w_18, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_18 // w[i-15] `rightshift` 3 pol commit w_15_rshift_3; @@ -199,20 +199,20 @@ namespace sha256; #[RANGE_RHS_W_3] sel_compute_w { two_pow_3, rhs_w_3, /*result = 1*/sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_3 // s0 := (w[i-15] `rotr` 7) `xor` (w[i-15] `rotr` 18) `xor` (w[i-15] `rightshift` 3) pol commit w_15_rotr_7_xor_w_15_rotr_18; #[W_S_0_XOR_0] sel_compute_w { w_15_rotr_7, w_15_rotr_18, w_15_rotr_7_xor_w_15_rotr_18, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit w_s_0; #[W_S_0_XOR_1] sel_compute_w { w_15_rotr_7_xor_w_15_rotr_18, w_15_rshift_3, w_s_0, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; // ========== Compute w_s1 =================== // w[i-2] `rotr` 17 @@ -227,7 +227,7 @@ namespace sha256; #[RANGE_RHS_W_17] sel_compute_w { two_pow_17, rhs_w_17, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_17 // w[i-2] `rotr` 19 pol commit w_2_rotr_19; @@ -241,7 +241,7 @@ namespace sha256; #[RANGE_RHS_W_19] sel_compute_w { two_pow_19, rhs_w_19, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_19 // w[i-2] `rightshift` 10 pol commit w_2_rshift_10; @@ -255,20 +255,20 @@ namespace sha256; #[RANGE_RHS_W_10] sel_compute_w { two_pow_10, rhs_w_10, /*result = 1*/ sel_compute_w } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_w_10 // s1 := (w[i-2] `rotr` 17) `xor` (w[i-2] `rotr` 19) `xor` (w[i-2] `rightshift` 10) pol commit w_2_rotr_17_xor_w_2_rotr_19; #[W_S_1_XOR_0] sel_compute_w { w_2_rotr_17, w_2_rotr_19, w_2_rotr_17_xor_w_2_rotr_19, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit w_s_1; #[W_S_1_XOR_1] sel_compute_w { w_2_rotr_17_xor_w_2_rotr_19, w_2_rshift_10, w_s_1, xor_sel, u32_tag, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a, bitwise.tag_a }; // ========== START OF COMPRESSION BLOCK ================== @@ -285,7 +285,7 @@ namespace sha256; #[RANGE_RHS_E_6] perform_round { two_pow_6, rhs_e_6, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_e_6 // e `rotr` 11 pol commit e_rotr_11; @@ -299,7 +299,7 @@ namespace sha256; #[RANGE_RHS_E_11] perform_round { two_pow_11, rhs_e_11, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_e_11 // e `rotr` 25 pol commit e_rotr_25; @@ -313,7 +313,7 @@ namespace sha256; #[RANGE_RHS_E_25] perform_round { two_pow_25, rhs_e_25, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_e_25 // pol S_1 = (E_0 `rotr` 6) `xor` (E_0 `rotr` 11) `xor` (E_0 `rotr` 25); @@ -321,13 +321,13 @@ namespace sha256; #[S_1_XOR_0] perform_round { e_rotr_6, e_rotr_11, e_rotr_6_xor_e_rotr_11, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit s_1; #[S_1_XOR_1] perform_round { e_rotr_6_xor_e_rotr_11, e_rotr_25, s_1, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; // ==== COMPUTING CH =========== // pol CH_0 = (E_0 `and` F_0) `xor` ((`not` E_0) `and` G_0); @@ -336,7 +336,7 @@ namespace sha256; #[CH_AND_0] perform_round { e, f, e_and_f, and_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit not_e; perform_round * (e + not_e - (2**32 - 1)) = 0; @@ -345,13 +345,13 @@ namespace sha256; #[CH_AND_1] perform_round { not_e, g, not_e_and_g, and_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit ch; #[CH_XOR] perform_round { e_and_f, not_e_and_g, ch, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; // ===== COMPUTING TMP 1 =========== // Lookup round constants @@ -377,7 +377,7 @@ namespace sha256; #[RANGE_RHS_A_2] perform_round { two_pow_2, rhs_a_2, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_a_2 // a `rotr` 13 pol commit a_rotr_13; @@ -391,7 +391,7 @@ namespace sha256; #[RANGE_RHS_A_13] perform_round { two_pow_13, rhs_a_13, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_a_13 // a `rotr` 22 pol commit a_rotr_22; @@ -405,20 +405,20 @@ namespace sha256; #[RANGE_RHS_A_22] perform_round { two_pow_22, rhs_a_22, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; ///// End Check rhs_a_22 // (A_0 `rotr` 2) `xor` (A_0 `rotr` 13) pol commit a_rotr_2_xor_a_rotr_13; #[S_0_XOR_0] perform_round { a_rotr_2, a_rotr_13, a_rotr_2_xor_a_rotr_13, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit s_0; #[S_0_XOR_1] perform_round { a_rotr_2_xor_a_rotr_13, a_rotr_22, s_0, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; // ====== Computing Maj ========= // pol MAJ_0 = (A_0 `and` B_0) `xor` (A_0 `and` C_0) `xor` (B_0 `and` C_0); @@ -426,31 +426,31 @@ namespace sha256; #[MAJ_AND_0] perform_round { a, b, a_and_b, and_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit a_and_c; #[MAJ_AND_1] perform_round { a, c, a_and_c, and_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit b_and_c; #[MAJ_AND_2] perform_round { b, c, b_and_c, and_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit a_and_b_xor_a_and_c; #[MAJ_XOR_0] perform_round { a_and_b, a_and_c, a_and_b_xor_a_and_c, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; pol commit maj; #[MAJ_XOR_1] perform_round { a_and_b_xor_a_and_c, b_and_c, maj, xor_sel, u32_tag } in - bitwise.start { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; + bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a }; // ==== Compute TMP 2 ==== pol NEXT_A = s_0 + maj + TMP_1; @@ -462,11 +462,11 @@ namespace sha256; #[RANGE_COMP_NEXT_A_LHS] perform_round { two_pow_32, next_a_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_NEXT_A_RHS] perform_round { two_pow_32, next_a_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; pol NEXT_E = d + TMP_1; @@ -477,11 +477,11 @@ namespace sha256; #[RANGE_COMP_NEXT_E_LHS] perform_round { two_pow_32, next_e_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_NEXT_E_RHS] perform_round { two_pow_32, next_e_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; perform_round * (a' - next_a_rhs) = 0; perform_round * (b' - a) = 0; @@ -514,64 +514,64 @@ namespace sha256; #[RANGE_COMP_A_LHS] perform_round { two_pow_32, output_a_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_A_RHS] perform_round { two_pow_32, output_a_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_B_LHS] perform_round { two_pow_32, output_b_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_B_RHS] perform_round { two_pow_32, output_b_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_C_LHS] perform_round { two_pow_32, output_c_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_C_RHS] perform_round { two_pow_32, output_c_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_D_LHS] perform_round { two_pow_32, output_d_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_D_RHS] perform_round { two_pow_32, output_d_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_E_LHS] perform_round { two_pow_32, output_e_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_E_RHS] perform_round { two_pow_32, output_e_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_F_LHS] perform_round { two_pow_32, output_f_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_F_RHS] perform_round { two_pow_32, output_f_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_G_LHS] perform_round { two_pow_32, output_g_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_G_RHS] perform_round { two_pow_32, output_g_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_H_LHS] perform_round { two_pow_32, output_h_lhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; #[RANGE_COMP_H_RHS] perform_round { two_pow_32, output_h_rhs, /*result = 1*/ perform_round } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_sha256 { gt.input_a, gt.input_b, gt.res }; diff --git a/barretenberg/cpp/pil/vm2/sha256_mem.pil b/barretenberg/cpp/pil/vm2/sha256_mem.pil index 1515f5cc41a5..dfd7d54b30e2 100644 --- a/barretenberg/cpp/pil/vm2/sha256_mem.pil +++ b/barretenberg/cpp/pil/vm2/sha256_mem.pil @@ -15,12 +15,12 @@ include "execution.pil"; * sha256.start { sha256.execution_clk, sha256.space_id, sha256.output_addr, sha256.state_addr, sha256.input_addr, sha256.err }; * * This trace reads and writes the following: - * (1) Read State: From { state_addr, state_addr + 1, ..., state_addr + 7 } + * (1) Read State: From { state_addr, state_addr + 1, ..., state_addr + 7 } * (2) Read Input: From { input_addr, input_addr + 1, ..., input_addr + 15 } * (3) Write Output: From { output_addr, output_addr + 1, ..., output_addr + 7 } * All values operated on in this subtrace are validated to be U32. * This subtrace has a mix of a vertical and horizontal memory accesses. - * The State and Outputs are written horizontally (i.e. there are 8 columns), this means that + * The State and Outputs are written horizontally (i.e. there are 8 columns), this means that * they are loaded in "parallel" and therefore they form their own temporality groups (for the purpose of any errors) * The Input is "mixed" into the hash at each round (from round 1 to 16), this means the inputs are loaded in a single column and the i-th input * is loaded in the i-th row of computation. Therefore, a single load is a temporality group (for the purpose of errors). @@ -90,7 +90,7 @@ include "execution.pil"; */ namespace sha256; - + pol commit sel; sel * (1 - sel) = 0; @@ -117,7 +117,7 @@ namespace sha256; #[LATCH_HAS_SEL_ON] // sel = 1 when latch = 1, sel = 0 at first row because of shift relations latch * (1 - sel) = 0; // This is now guaranteed to be mututally exclusive because of the above condition and since sel = 0 at row = 0 (since it has shifts) - pol LATCH_CONDITION = latch + precomputed.first_row; + pol LATCH_CONDITION = latch + precomputed.first_row; #[START_AFTER_LAST] sel' * (start' - LATCH_CONDITION) = 0; @@ -154,21 +154,21 @@ namespace sha256; #[CHECK_STATE_ADDR_IN_RANGE] start { max_state_addr, max_mem_addr, sel_state_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; pol commit max_input_addr; max_input_addr = start * (input_addr + 15); // We read 16 elements for inputs #[CHECK_INPUT_ADDR_IN_RANGE] start { max_input_addr, max_mem_addr, sel_input_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; pol commit max_output_addr; max_output_addr = start * (output_addr + 7); // We write 8 elements for the output #[CHECK_OUTPUT_ADDR_IN_RANGE] start { max_output_addr, max_mem_addr, sel_output_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; pol commit mem_out_of_range_err; mem_out_of_range_err = 1 - (1 - sel_state_out_of_range_err) * (1 - sel_input_out_of_range_err) * (1 - sel_output_out_of_range_err); @@ -177,7 +177,7 @@ namespace sha256; // Memory Operations: State and Output //////////////////////////////////////////////////////// // Since reading the hash state and writing the hash outputs have similar "shapes" (i.e. 8 columns) - // and occur at different "times" (the start and end (latch) respectively), we define a set of + // and occur at different "times" (the start and end (latch) respectively), we define a set of // shared columns that we can re-use between the two. This saves us ~24 columns and 8 permutations. pol commit memory_address[8]; // Addresses to read from or write to @@ -230,101 +230,100 @@ namespace sha256; // flag that accounts for this pol commit rw; rw = OUTPUT_WRITE_CONDITION; - // TODO: These need to be changed to permutations once we have the custom permutation selectors impl #[MEM_OP_0] - sel_mem_state_or_output { - execution_clk, memory_address[0], - memory_register[0], memory_tag[0], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[0], memory_register[0], + memory_tag[0], /*rw=*/ rw + } is + memory.sel_sha256_op[0] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_1] - sel_mem_state_or_output { - execution_clk, memory_address[1], - memory_register[1], memory_tag[1], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[1], memory_register[1], + memory_tag[1], /*rw=*/ rw + } is + memory.sel_sha256_op[1] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_2] - sel_mem_state_or_output { - execution_clk, memory_address[2], - memory_register[2], memory_tag[2], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[2], memory_register[2], + memory_tag[2], /*rw=*/ rw + } is + memory.sel_sha256_op[2] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_3] - sel_mem_state_or_output { - execution_clk, memory_address[3], - memory_register[3], memory_tag[3], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[3], memory_register[3], + memory_tag[3], /*rw=*/ rw + } is + memory.sel_sha256_op[3] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_4] - sel_mem_state_or_output { - execution_clk, memory_address[4], - memory_register[4], memory_tag[4], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[4], memory_register[4], + memory_tag[4], /*rw=*/ rw + } is + memory.sel_sha256_op[4] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_5] - sel_mem_state_or_output { - execution_clk, memory_address[5], - memory_register[5], memory_tag[5], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[5], memory_register[5], + memory_tag[5], /*rw=*/ rw + } is + memory.sel_sha256_op[5] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_6] - sel_mem_state_or_output { - execution_clk, memory_address[6], - memory_register[6], memory_tag[6], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[6], memory_register[6], + memory_tag[6], /*rw=*/ rw + } is + memory.sel_sha256_op[6] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; #[MEM_OP_7] - sel_mem_state_or_output { - execution_clk, memory_address[7], - memory_register[7], memory_tag[7], - space_id, /*rw=*/ rw - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_mem_state_or_output { + execution_clk, space_id, + memory_address[7], memory_register[7], + memory_tag[7], /*rw=*/ rw + } is + memory.sel_sha256_op[7] { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; //////////////////////////////////////////////// @@ -345,7 +344,7 @@ namespace sha256; pol BATCHED_TAG_CHECK = 2**0 * STATE_TAG_DIFF_0 + 2**3 * STATE_TAG_DIFF_1 + 2**6 * STATE_TAG_DIFF_2 + 2**9 * STATE_TAG_DIFF_3 + 2**12 * STATE_TAG_DIFF_4 + 2**15 * STATE_TAG_DIFF_5 - + 2**18 * STATE_TAG_DIFF_6 + 2**21 * STATE_TAG_DIFF_7; + + 2**18 * STATE_TAG_DIFF_6 + 2**21 * STATE_TAG_DIFF_7; pol commit batch_tag_inv; #[BATCH_ZERO_CHECK_READ] @@ -385,7 +384,7 @@ namespace sha256; sel * (1 - LATCH_CONDITION) * (input_rounds_rem' - (input_rounds_rem - sel_is_input_round)) = 0; // Read input if we have not had an error from the previous temporality groups and this is an input round - pol commit sel_read_input_from_memory; + pol commit sel_read_input_from_memory; sel_read_input_from_memory = sel * (1 - mem_out_of_range_err) * sel_is_input_round * (1 - sel_invalid_state_tag_err); // Put the result of the input loads in to the corresponding w value @@ -396,15 +395,15 @@ namespace sha256; pol commit input_tag; #[MEM_INPUT_READ] - sel_read_input_from_memory { - execution_clk, input_addr, - input, input_tag, - space_id, /*rw=*/ precomputed.zero - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_read_input_from_memory { + execution_clk, space_id, + input_addr, input, + input_tag, /*rw=*/ precomputed.zero + } is + memory.sel_sha256_read { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; //////////////////////////////////////////////// @@ -418,7 +417,7 @@ namespace sha256; pol commit sel_invalid_input_row_tag_err; sel_invalid_input_row_tag_err * (1 - sel_invalid_input_row_tag_err) = 0; - pol INPUT_TAG_DIFF = sel_read_input_from_memory * (input_tag - constants.MEM_TAG_U32); + pol INPUT_TAG_DIFF = sel_read_input_from_memory * (input_tag - constants.MEM_TAG_U32); pol commit input_tag_diff_inv; // Iff INPUT_TAG_DIFF != 0, sel_invalid_input_row_tag_err = 1 #[INPUT_TAG_DIFF_CHECK] diff --git a/barretenberg/cpp/pil/vm2/to_radix.pil b/barretenberg/cpp/pil/vm2/to_radix.pil index 007d2af6d1c5..3304087cbbe0 100644 --- a/barretenberg/cpp/pil/vm2/to_radix.pil +++ b/barretenberg/cpp/pil/vm2/to_radix.pil @@ -193,6 +193,10 @@ namespace to_radix; limb_lt_p * (LIMB_LT_P - limb_p_diff) = 0; sel * (1 - limb_lt_p) * ((LIMB_EQ_P - LIMB_GT_P) * limb_eq_p + LIMB_GT_P - limb_p_diff) = 0; + // Remark: We do not want to migrate this to "gt gadget" for multiple reasons: + // - Current 8-bit range check is small. (gt uses at least a 16-bit range check internally) + // - There is a special trick above to capture the EQ case. + // - gt gadget would introduce circuit leakage in simulation. #[LIMB_P_DIFF_RANGE] not_padding_limb { limb_p_diff } in diff --git a/barretenberg/cpp/pil/vm2/to_radix_mem.pil b/barretenberg/cpp/pil/vm2/to_radix_mem.pil index 78a8c56cb51c..3bb3cc4e3520 100644 --- a/barretenberg/cpp/pil/vm2/to_radix_mem.pil +++ b/barretenberg/cpp/pil/vm2/to_radix_mem.pil @@ -8,7 +8,7 @@ include "precomputed.pil"; * The reads are loaded and tag validated by the registers in the execution trace. * This trace writes a sequence of U1s or U8s (depending on the boolean is_output_bits) to the addresses * {dst, dst + 1, ... , dst + num_limbs - 1 }. - * + * * This trace has a vertical layout, where a single write is performed per row. This is due * to the dynamic amount of writes that can be performed. This has a benefit as we only require * a single permutation to the memory trace. @@ -40,17 +40,21 @@ include "precomputed.pil"; * (2) INVALID_NUM_LIMBS: If num_limbs = 0 while value_to_decompose != 0. * (3) DST_OUT_OF_BOUNDS_ACCESS: If the writes would access a memory address outside * of the max AVM memory address (AVM_HIGHEST_MEM_ADDRESS). + * (4) TRUNCATION_ERROR: If the value can't be fully decomposed in the given number of limbs. * * N.B The radix is checked to be <= 256 in the execution trace (for dynamic gas computation) * we do not currently take advantage of any partial checks done but is a future optimisation - * Finally, if is num_limbs = 0 && value_to_decompose = 0 (which are valid inputs), then no memory writes are performed + * Also, if is num_limbs = 0 && value_to_decompose = 0 (which are valid inputs), then no memory writes are performed + * Finally, we check that the value has been fully decomposed in the given number of limbs. In order to do this, we + * check in the start row that the value has been found in the TORADIX subtrace. * * This subtrace is connected to the TORADIX subtrace via a lookup. TORADIX is used by - * other subtraces internally (e.g., scalar mul ). - * + * other subtraces internally (e.g., scalar mul). + * * NOTE: The TORADIX subtrace performs the decomposition in LITTLE ENDIAN. This is more optimal * for the internal gadget use. Therefore this subtrace needs to reverse the output of TORADIX - * since the opcode requires BIG ENDIAN. + * since the opcode requires BIG ENDIAN. This allows us to perform the check for truncation error in the start row, + * since the start row of this trace is the last limb in the little endian decomposition. */ namespace to_radix_mem; @@ -77,12 +81,12 @@ namespace to_radix_mem; pol commit last; // Constrained in the Control flow section as depends on error last * (1 - last) = 0; - // Since sel has a shifted column (see CONTINUITY checks), sel must equal 0 in the first row + // Since sel has a shifted column (see CONTINUITY checks), sel must equal 0 in the first row // Therefore, last must equal 0 in the first row as well. #[LAST_HAS_SEL_ON] // sel = 1 when last = 1 last * (1 - sel) = 0; // This is now guaranteed to be mututally exclusive because of the above condition - pol LATCH_CONDITION = last + precomputed.first_row; + pol LATCH_CONDITION = last + precomputed.first_row; // If the next row is active and the current row is last or first, then start is active // on the next row. #[START_AFTER_LAST] @@ -106,8 +110,7 @@ namespace to_radix_mem; //////////////////////////////////////////////// // Error Handling - Out of Range Memory Access //////////////////////////////////////////////// - pol commit sel_dst_out_of_range_err; - sel_dst_out_of_range_err * (1 - sel_dst_out_of_range_err) = 0; + pol commit sel_dst_out_of_range_err; // Constrained to be boolean by the lookup into gt. (provided that start == 1). // Use the comparison gadget to check that the max addresses are within range // The comparison gadget provides the ability to test GreaterThan so we check @@ -121,18 +124,16 @@ namespace to_radix_mem; #[CHECK_DST_ADDR_IN_RANGE] start { max_write_addr, max_mem_addr, sel_dst_out_of_range_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; //////////////////////////////////////////////// // Error Handling - Check Radix is valid //////////////////////////////////////////////// // To check that 2 <= radix <= 256, we raise error flags if 2 > radix or radix > 256. // We formulate them like this since we have access to a GT gadget. - pol commit sel_radix_lt_2_err; // Radix is < 2 - sel_radix_lt_2_err * (1 - sel_radix_lt_2_err) = 0; + pol commit sel_radix_lt_2_err; // Radix is < 2 (Constrained to be boolean by the lookup into gt. (provided that start == 1).) - pol commit sel_radix_gt_256_err; // Radix is > 256 - sel_radix_gt_256_err * (1 - sel_radix_gt_256_err) = 0; + pol commit sel_radix_gt_256_err; // Radix is > 256 (Constrained to be boolean by the lookup into gt. (provided that start == 1).) pol commit sel_invalid_bitwise_radix; // Radix != 2 if output_bits = 1 sel_invalid_bitwise_radix * (1 - sel_invalid_bitwise_radix) = 0; @@ -145,12 +146,12 @@ namespace to_radix_mem; #[CHECK_RADIX_LT_2] start { two, radix, sel_radix_lt_2_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; #[CHECK_RADIX_GT_256] start { radix, two_five_six, sel_radix_gt_256_err } in - gt.sel { gt.input_a, gt.input_b, gt.res }; + gt.sel_others { gt.input_a, gt.input_b, gt.res }; // is_output_bits already guaranteed to be boolean by execution trace tag validation #[IS_OUTPUT_BITS_IMPLY_RADIX_2] @@ -175,11 +176,55 @@ namespace to_radix_mem; pol commit sel_invalid_num_limbs_err; // err = 1 if num_limbs == 0 while value != 0 sel_invalid_num_limbs_err = sel_num_limbs_is_zero * (1 - sel_value_is_zero); - pol commit err; // Consolidated error flag - err = 1 - (1 - sel_dst_out_of_range_err) * (1 - sel_radix_lt_2_err) + + ///////////////////////////////////////////////////// + // Dispatch inputs to to_radix + ///////////////////////////////////////////////////// + pol commit input_validation_error; + input_validation_error = 1 - (1 - sel_dst_out_of_range_err) * (1 - sel_radix_lt_2_err) * (1 - sel_radix_gt_256_err) * (1 - sel_invalid_bitwise_radix) * (1 - sel_invalid_num_limbs_err); + pol commit limb_value; + // In the low level to_radix gadget, found will be on when the number has been reconstructed with the current limbs. + // Refer to the table in the low level to_radix gadget for a visual example. + pol commit value_found; + pol commit sel_should_decompose; + sel_should_decompose * (1 - sel_should_decompose) = 0; + + // On the start row, we define sel_should_decompose as !input_validation_error && !num_limbs_is_zero + // We can't inline input_validation_error because of the degree. + start * ((1 - input_validation_error) * (1 - sel_num_limbs_is_zero) - sel_should_decompose) = 0; + + // On following rows, we propagate input_validation_error. + #[SEL_SHOULD_DECOMPOSE_CONTINUITY] + NOT_LAST * (sel_should_decompose' - sel_should_decompose) = 0; + + pol commit limb_index_to_lookup; // Need this since we want Big-Endian but the gadget is Little-Endian + limb_index_to_lookup = sel_should_decompose * (num_limbs - 1); + + #[INPUT_OUTPUT_TO_RADIX] + sel_should_decompose { value_to_decompose, limb_index_to_lookup, radix, limb_value, value_found } + in + to_radix.sel {to_radix.value, to_radix.limb_index, to_radix.radix, to_radix.limb, to_radix.found }; + + ///////////////////////////////////////////////////// + // Error Handling - Check no truncation error + ///////////////////////////////////////////////////// + pol commit sel_truncation_error; + sel_truncation_error * (1 - sel_truncation_error) = 0; + // A truncation error happens if in the start row, we look up the to_radix gadget and value_found is off. + // The to_radix gadget is little endian, so the first row that we lookup is the last limb. If it's not found in the last limb, + // it means that the number is truncated with the given number of limbs. + #[TRUNCATION_ERROR] + sel_truncation_error = start * sel_should_decompose * (1 - value_found); + + ///////////////////////////////////////////////////// + // Error Handling - Consolidated error flag + ///////////////////////////////////////////////////// + pol commit err; + err = start * (1 - (1 - input_validation_error) * (1 - sel_truncation_error)); + ///////////////////////////////////////////////////// // Control flow management and terminating trace ///////////////////////////////////////////////////// @@ -190,59 +235,53 @@ namespace to_radix_mem; #[INCR_DST_ADDRESS] NOT_LAST * (dst_addr' - (dst_addr + 1)) = 0; - // Constraining last - the computation is terminated when: num_limbs = 1 or err = 1 - pol SEL_NO_ERR = sel * (1 - err); - pol NUM_LIMBS_MINUS_ONE = num_limbs - 1; - pol commit num_limbs_minus_one_inv; - + // Constraining last - the computation is terminated when one of the following conditions is met: + // 1) err == 1 + // 2) num_limbs == 0 + // 3) num_limbs == 1 + // Note: These conditions are not mutually exclusive. - // todo(ilyas); there must be a nicer way to make these relations cleaner. Last needs to be set based on: - // 1) num_limbs - 1 = 0; part of a normal computation - // 2) num_limbs = 0; if the user-supplied num_limbs = 0 && value_to_decompose = 0 - // 3) err = 1; - #[LAST_ROW_VALID_COMPUTATION] // last = 1 if num_limbs = 1 && err = 0 && num_limbs != 0 - SEL_NO_ERR * (1 - sel_num_limbs_is_zero) * (NUM_LIMBS_MINUS_ONE * (last * (1 - num_limbs_minus_one_inv) + num_limbs_minus_one_inv) - 1 + last) = 0; - - #[LAST_ROW_NUM_LIMBS_ZERO] // last = 1 if num_limbs = 0 - sel * sel_num_limbs_is_zero * (last - 1) = 0; - - #[LAST_ROW_ERR_COMPUTATION] // last = 1 if err = 1 && num_limbs != 0 - sel * err * (1 - sel_num_limbs_is_zero) * (last - 1) = 0; + #[LAST_ROW_ERR_COMPUTATION] // last == 1 if err == 1 (can only occur on the start row) + start * err * (last - 1) = 0; // The implication of the above condition is that LATCH_CONDITION = 1 if there is an error - ///////////////////////////////////////////////////// - // Dispatch inputs to to_radix and retrieve outputs - ///////////////////////////////////////////////////// - pol commit output_limb_value; - pol commit sel_should_exec; - // If the num limbs are zero, we don't dispatch to the gadget or write to memory. - sel_should_exec = sel * (1 - err) * (1 - sel_num_limbs_is_zero); + #[LAST_ROW_NUM_LIMBS_ZERO] // last == 1 if num_limbs == 0 (can only occur on the start row) + start * sel_num_limbs_is_zero * (last - 1) = 0; - pol commit limb_index_to_lookup; // Need this since we want Big-Endian but the gadget is Little-Endian - limb_index_to_lookup = sel_should_exec * (num_limbs - 1); - #[INPUT_OUTPUT_TO_RADIX] - sel_should_exec { value_to_decompose, limb_index_to_lookup, radix, output_limb_value } - in - to_radix.sel {to_radix.value, to_radix.limb_index, to_radix.radix, to_radix.limb }; + // Introduce a selector covering all active rows except the start row with an error or the start row + // with num_limbs == 0. + pol NO_ERR_NOR_NUM_LIMBS_ZERO = start * (1 - err) * (1 - sel_num_limbs_is_zero) + (1 - start) * sel; // Error can only occur on the start row. + pol NUM_LIMBS_MINUS_ONE = num_limbs - 1; + pol commit num_limbs_minus_one_inv; + + // If [start == 1 && err = 0 && num_limbs != 0] || [start == 0], we constrain: last = 1 iff num_limbs = 1 + #[LAST_ROW_VALID_COMPUTATION] + NO_ERR_NOR_NUM_LIMBS_ZERO * (NUM_LIMBS_MINUS_ONE * (last * (1 - num_limbs_minus_one_inv) + num_limbs_minus_one_inv) - 1 + last) = 0; //////////////////////////////////////////////// // Write output to memory //////////////////////////////////////////////// - // TODO: These need to be changed to permutations once we have the custom permutation selectors impl + pol commit sel_should_write_mem; + // We compute sel_should_write_mem in the start row, as no error at all and num_limbs != 0. + start * ((1 - err) * (1 - sel_num_limbs_is_zero) - sel_should_write_mem) = 0; + // On following rows, we propagate sel_should_write_mem. + #[SEL_SHOULD_WRITE_MEM_CONTINUITY] + NOT_LAST * (sel_should_write_mem' - sel_should_write_mem) = 0; + pol commit output_tag; // Conditional Assignment: is_output_bits ? U1 : U8 - output_tag = sel_should_exec * ((constants.MEM_TAG_U1 - constants.MEM_TAG_U8) * is_output_bits + constants.MEM_TAG_U8); + output_tag = sel_should_write_mem * ((constants.MEM_TAG_U1 - constants.MEM_TAG_U8) * is_output_bits + constants.MEM_TAG_U8); #[WRITE_MEM] - sel_should_exec { - execution_clk, dst_addr, - output_limb_value, /*mem_tag*/ output_tag, - space_id, /*rw=1*/ sel_should_exec - } in - memory.sel { - memory.clk, memory.address, - memory.value, memory.tag, - memory.space_id, memory.rw + sel_should_write_mem { + execution_clk, space_id, + dst_addr, limb_value, + /*mem_tag*/ output_tag, /*rw=1*/ sel_should_write_mem + } is + memory.sel_to_radix_write { + memory.clk, memory.space_id, + memory.address, memory.value, + memory.tag, memory.rw }; diff --git a/barretenberg/cpp/pil/vm2/trees/l1_to_l2_message_tree_check.pil b/barretenberg/cpp/pil/vm2/trees/l1_to_l2_message_tree_check.pil index 11449836f972..251852bca6cc 100644 --- a/barretenberg/cpp/pil/vm2/trees/l1_to_l2_message_tree_check.pil +++ b/barretenberg/cpp/pil/vm2/trees/l1_to_l2_message_tree_check.pil @@ -1,6 +1,5 @@ include "merkle_check.pil"; include "../constants_gen.pil"; -include "../poseidon2_hash.pil"; /** * This gadget checks if a message exists in the L1 to L2 message tree. diff --git a/barretenberg/cpp/pil/vm2/trees/merkle_check.pil b/barretenberg/cpp/pil/vm2/trees/merkle_check.pil index 1422aba26108..5b804f00b8aa 100644 --- a/barretenberg/cpp/pil/vm2/trees/merkle_check.pil +++ b/barretenberg/cpp/pil/vm2/trees/merkle_check.pil @@ -161,17 +161,14 @@ namespace merkle_check; pol commit read_output_hash; pol commit write_output_hash; - pol commit constant_2; - sel * (constant_2 - 2) = 0; - // Lookup to the full poseidon2 gadget #[MERKLE_POSEIDON2_READ] - sel { read_left_node, read_right_node, /*input_2=*/ precomputed.zero, /*input_len=*/ constant_2, read_output_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.input_len, poseidon2_hash.output }; + sel { read_left_node, read_right_node, /*input_2=*/ precomputed.zero, /*start=1=*/ sel, read_output_hash } + in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.start, poseidon2_hash.output }; #[MERKLE_POSEIDON2_WRITE] - write { write_left_node, write_right_node, /*input_2=*/ precomputed.zero, /*input_len=*/ constant_2, write_output_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.input_len, poseidon2_hash.output }; + write { write_left_node, write_right_node, /*input_2=*/ precomputed.zero, /*start=1=*/ sel, write_output_hash } + in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.start, poseidon2_hash.output }; // If we are not done, this row's output_hash is the next row's node #[OUTPUT_HASH_IS_NEXT_ROWS_READ_NODE] diff --git a/barretenberg/cpp/pil/vm2/trees/nullifier_check.pil b/barretenberg/cpp/pil/vm2/trees/nullifier_check.pil index 18833b547eb9..d78f43036cb8 100644 --- a/barretenberg/cpp/pil/vm2/trees/nullifier_check.pil +++ b/barretenberg/cpp/pil/vm2/trees/nullifier_check.pil @@ -131,14 +131,14 @@ namespace nullifier_check; sel * (tree_height - constants.NULLIFIER_TREE_HEIGHT) = 0; #[LOW_LEAF_POSEIDON2] - sel { low_leaf_nullifier, low_leaf_next_nullifier, low_leaf_next_index, low_leaf_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { sel /* =1 */, low_leaf_nullifier, low_leaf_next_nullifier, low_leaf_next_index, low_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; pol commit updated_low_leaf_hash; #[UPDATED_LOW_LEAF_POSEIDON2] - should_insert { low_leaf_nullifier, updated_low_leaf_next_nullifier, updated_low_leaf_next_index, updated_low_leaf_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + should_insert { sel /* =1 */, low_leaf_nullifier, updated_low_leaf_next_nullifier, updated_low_leaf_next_index, updated_low_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; #[LOW_LEAF_MERKLE_CHECK] sel { should_insert, low_leaf_hash, updated_low_leaf_hash, @@ -182,8 +182,8 @@ namespace nullifier_check; pol commit new_leaf_hash; #[NEW_LEAF_POSEIDON2] - should_insert { siloed_nullifier, low_leaf_next_nullifier, low_leaf_next_index, new_leaf_hash } - in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + should_insert { sel /* =1 */, siloed_nullifier, low_leaf_next_nullifier, low_leaf_next_index, new_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; #[NEW_LEAF_MERKLE_CHECK] should_insert { sel, precomputed.zero, new_leaf_hash, diff --git a/barretenberg/cpp/pil/vm2/trees/public_data_check.pil b/barretenberg/cpp/pil/vm2/trees/public_data_check.pil index c929b2475498..51636009bc0c 100644 --- a/barretenberg/cpp/pil/vm2/trees/public_data_check.pil +++ b/barretenberg/cpp/pil/vm2/trees/public_data_check.pil @@ -50,6 +50,7 @@ include "public_data_squash.pil"; * value, * address, * slot, + * discard, * prev_public_data_tree_root, * next_public_data_tree_root, * prev_public_data_tree_size, @@ -59,6 +60,7 @@ include "public_data_squash.pil"; * public_data_check.value, * public_data_check.address, * public_data_check.slot, + * public_data_check.discard, * public_data_check.root, * public_data_check.write_root, * public_data_check.tree_size_before_write, @@ -97,6 +99,13 @@ namespace public_data_check; write * (1 - write) = 0; // If write is on, sel must be on too. write * (1 - sel) = 0; + // Whether the write is a protocol or non-protocol write (permutation selectors (sstore.pil, tx.pil)) + pol commit protocol_write; + protocol_write * (1 - protocol_write) = 0; + pol commit non_protocol_write; + non_protocol_write * (1 - non_protocol_write) = 0; + #[PROTOCOL_WRITE_CHECK] + protocol_write + non_protocol_write = write; pol commit clk; pol commit discard; @@ -114,17 +123,20 @@ namespace public_data_check; // ========= SORTING ========= // If not the last row, clock must not decrease - pol commit clk_diff; - clk_diff = not_end * (clk' - clk); + pol CLK_DIFF = not_end * (clk' - clk); - // TODO: Commited because lookups don't support constants - pol commit constant_32; - sel * (32 - constant_32) = 0; + pol commit clk_diff_lo; + pol commit clk_diff_hi; + #[CLK_DIFF_DECOMP] + CLK_DIFF = clk_diff_lo + 2**16 * clk_diff_hi; + + #[CLK_DIFF_RANGE_LO] + not_end { clk_diff_lo } + in precomputed.sel_range_16 { precomputed.clk }; - // Disabled sorting lookups for now - // #[CLK_DIFF_RANGE] - // not_end { clk_diff, constant_32 } - // in range_check.sel { range_check.value, range_check.rng_chk_bits }; + #[CLK_DIFF_RANGE_HI] + not_end { clk_diff_hi } + in precomputed.sel_range_16 { precomputed.clk }; // We enforce reads, which have unconstrained clk, to have clk = 0 // So they have to be at the start of the trace @@ -187,13 +199,15 @@ namespace public_data_check; pol commit low_leaf_hash; // The intermediate root is the root of the tree after the low leaf update but before the new leaf is inserted. pol commit intermediate_root; - // TODO: We need this temporarily while we do not allow for aliases in the lookup tuple + // TODO: We need these temporarily while we do not allow for aliases in the lookup tuple pol commit tree_height; sel * (tree_height - constants.PUBLIC_DATA_TREE_HEIGHT) = 0; + pol commit const_two; + sel * (const_two - 2) = 0; #[LOW_LEAF_POSEIDON2_0] - sel { low_leaf_slot, low_leaf_value, low_leaf_next_index, low_leaf_hash } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + sel { low_leaf_slot, low_leaf_value, low_leaf_next_index, low_leaf_hash, const_two } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[LOW_LEAF_POSEIDON2_1] sel { low_leaf_next_slot, precomputed.zero, precomputed.zero, low_leaf_hash } @@ -202,8 +216,8 @@ namespace public_data_check; pol commit updated_low_leaf_hash; #[UPDATED_LOW_LEAF_POSEIDON2_0] - write { low_leaf_slot, updated_low_leaf_value, updated_low_leaf_next_index, updated_low_leaf_hash } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + write { low_leaf_slot, updated_low_leaf_value, updated_low_leaf_next_index, updated_low_leaf_hash, const_two } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[UPDATED_LOW_LEAF_POSEIDON2_1] write { updated_low_leaf_next_slot, precomputed.zero, precomputed.zero, updated_low_leaf_hash } @@ -228,8 +242,8 @@ namespace public_data_check; pol commit new_leaf_hash; #[NEW_LEAF_POSEIDON2_0] - should_insert { leaf_slot, value, low_leaf_next_index, new_leaf_hash } - in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + should_insert { leaf_slot, value, low_leaf_next_index, new_leaf_hash, const_two } + in poseidon2_hash.start { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output, poseidon2_hash.num_perm_rounds_rem }; #[NEW_LEAF_POSEIDON2_1] should_insert { low_leaf_next_slot, precomputed.zero, precomputed.zero, new_leaf_hash } in poseidon2_hash.end { poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; @@ -256,16 +270,16 @@ namespace public_data_check; (1 - sel) * sel' * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_DATA_WRITES_ROW_IDX - write_idx') = 0; // If a write with discard = 0, we permute to the public data squash trace to get write to public inputs - pol commit nondiscaded_write; - nondiscaded_write = write * (1 - discard); + pol commit non_discarded_write; + non_discarded_write = write * (1 - discard); pol commit should_write_to_public_inputs; // If this is a read or a discarded write, write to public inputs must be zero - (1 - nondiscaded_write) * should_write_to_public_inputs = 0; + (1 - non_discarded_write) * should_write_to_public_inputs = 0; pol commit final_value; #[SQUASHING] - nondiscaded_write { + non_discarded_write { leaf_slot, clk, should_write_to_public_inputs, @@ -289,15 +303,12 @@ namespace public_data_check; write_idx, leaf_slot, final_value - }in public_inputs.sel { + } in public_inputs.sel { precomputed.clk, public_inputs.cols[0], public_inputs.cols[1] }; - // TODO: On end, lookup the length of public data writes in public inputs. - // Consider not doing it here if we implement the solution for accurate write counting during execution. - // ========= ASSERT FINAL LENGTH ========= // Every transaction we prove must have at least one public data write, the fee one. // Thanks to that, we can assert here that the length of the public data writes vector in public inputs @@ -310,12 +321,11 @@ namespace public_data_check; pol commit length_pi_idx; sel * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_PUBLIC_DATA_WRITES_ROW_IDX - length_pi_idx) = 0; - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // #[WRITE_WRITES_LENGTH_TO_PUBLIC_INPUTS] - // end { - // length_pi_idx, - // public_data_writes_length - // }in public_inputs.sel { - // precomputed.clk, - // public_inputs.cols[0] - // }; + #[WRITE_WRITES_LENGTH_TO_PUBLIC_INPUTS] + end { + length_pi_idx, + public_data_writes_length + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; diff --git a/barretenberg/cpp/pil/vm2/trees/public_data_squash.pil b/barretenberg/cpp/pil/vm2/trees/public_data_squash.pil index 8bdd6a485440..754ff98a231d 100644 --- a/barretenberg/cpp/pil/vm2/trees/public_data_squash.pil +++ b/barretenberg/cpp/pil/vm2/trees/public_data_squash.pil @@ -6,7 +6,7 @@ include "../precomputed.pil"; // Write to public inputs is true in the first write to a leaf slot. // The final value is the value written last to the given leaf slot. // +-----+-----------+-------+-----+-------------+----------------------+-------------+----------+-------------+ -// | sel | leaf_slot | value | clk | write_to_pi | should_increase_leaf | check_clock | clk_diff | final_value | +// | sel | leaf_slot | value | clk | write_to_pi | leaf_slot_increase | check_clock | clk_diff | final_value | // +-----+-----------+-------+-----+-------------+----------------------+-------------+----------+-------------+ // | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | // | 1 | 27 | 1 | 5 | 1 | 0 | 1 | 7 | 2 | @@ -45,26 +45,28 @@ namespace public_data_squash; #[CHECK_SAME_LEAF_SLOT] NOT_END * (1 - leaf_slot_increase) * (leaf_slot - leaf_slot') = 0; - // TODO: Disabled sorting lookups for now - // #[LEAF_SLOT_INCREASE_FF_GT] - // leaf_slot_increase { leaf_slot', leaf_slot, sel } - // in ff_gt.sel_gt { ff_gt.a, ff_gt.b, ff_gt.result }; + #[LEAF_SLOT_INCREASE_FF_GT] + leaf_slot_increase { leaf_slot', leaf_slot, sel } + in ff_gt.sel_gt { ff_gt.a, ff_gt.b, ff_gt.result }; // Our 32 bit clock must not decrease for the same leaf slot pol commit check_clock; check_clock = NOT_END * (1 - leaf_slot_increase); - pol commit clk_diff; - clk_diff = check_clock * (clk' - clk); + pol CLK_DIFF = check_clock * (clk' - clk); - // TODO: Commited because lookups don't support constants - pol commit constant_32; - sel * (32 - constant_32) = 0; + pol commit clk_diff_lo; + pol commit clk_diff_hi; + #[CLK_DIFF_DECOMP] + CLK_DIFF = clk_diff_lo + 2**16 * clk_diff_hi; - // TODO: Disabled sorting lookups for now - // #[CLK_DIFF_RANGE] - // check_clock { clk_diff, constant_32 } - // in range_check.sel { range_check.value, range_check.rng_chk_bits }; + #[CLK_DIFF_RANGE_LO] + check_clock { clk_diff_lo } + in precomputed.sel_range_16 { precomputed.clk }; + + #[CLK_DIFF_RANGE_HI] + check_clock { clk_diff_hi } + in precomputed.sel_range_16 { precomputed.clk }; // ====== SQUASHING ====== // We write to public inputs the first ocurrence of a leaf slot, but with the last ocurrence value diff --git a/barretenberg/cpp/pil/vm2/trees/retrieved_bytecodes_tree_check.pil b/barretenberg/cpp/pil/vm2/trees/retrieved_bytecodes_tree_check.pil new file mode 100644 index 000000000000..5a1d2cd02b68 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/trees/retrieved_bytecodes_tree_check.pil @@ -0,0 +1,150 @@ +include "merkle_check.pil"; + +include "../ff_gt.pil"; +include "../poseidon2_hash.pil"; +include "../constants_gen.pil"; +include "../precomputed.pil"; + +// This gadget is used to track the unique class ids that have been retrieved in the TX. +// It's a transient indexed tree that starts empty (with a prefill) on every transaction, +// and it's discarded at the end of the transaction execution. +// The leaves only contain the class id and the indexed tree pointers. +// +// Read usage: +// sel { +// class_id, +// leaf_not_exists, +// retrieved_bytecodes_tree_root +// } in retrieved_bytecodes_tree_check.sel { +// retrieved_bytecodes_tree_check.class_id, +// retrieved_bytecodes_tree_check.leaf_not_exists, +// retrieved_bytecodes_tree_check.root +// }; +// +// Write usage: +// sel { +// class_id, +// sel, // 1 +// prev_retrieved_bytecodes_tree_root, +// prev_retrieved_bytecodes_tree_size, +// next_retrieved_bytecodes_tree_root, +// next_retrieved_bytecodes_tree_size +// } in retrieved_bytecodes_tree_check.sel { +// retrieved_bytecodes_tree_check.class_id, +// retrieved_bytecodes_tree_check.write, +// retrieved_bytecodes_tree_check.root, +// retrieved_bytecodes_tree_check.tree_size_before_write, +// retrieved_bytecodes_tree_check.write_root, +// retrieved_bytecodes_tree_check.tree_size_after_write +// }; + +namespace retrieved_bytecodes_tree_check; + pol commit sel; + sel * (1 - sel) = 0; + + #[skippable_if] + sel = 0; + + // Inputs to the gadget + pol commit write; + write * (1 - write) = 0; + pol READ = 1 - write; + + pol commit class_id; + pol commit root; + pol commit leaf_not_exists; + leaf_not_exists * (1 - leaf_not_exists) = 0; + pol EXISTS = 1 - leaf_not_exists; + + // Write specific inputs + pol commit write_root; + pol commit tree_size_before_write; + pol commit tree_size_after_write; + + // Hints + pol commit low_leaf_class_id; + pol commit low_leaf_next_index; + pol commit low_leaf_next_class_id; + + pol commit updated_low_leaf_next_index; + pol commit updated_low_leaf_next_class_id; + + pol commit low_leaf_index; + + // ========= HANDLE REDUNDANT WRITES ========= + pol commit should_insert; + should_insert = write * leaf_not_exists; + // On a failing write, the root must not change + write * EXISTS * (root - write_root) = 0; + + tree_size_after_write = tree_size_before_write + should_insert; + + // ========= COMPUTE LOW LEAF UPDATE ========= + should_insert * (tree_size_before_write - updated_low_leaf_next_index) = 0; + should_insert * (class_id - updated_low_leaf_next_class_id) = 0; + + // ========= LOW LEAF MERKLE CHECK ========= + pol commit low_leaf_hash; + // The intermediate root is the root of the tree after the low leaf update but before the new leaf is inserted. + pol commit intermediate_root; + // TODO: We need this temporarily while we do not allow for aliases in the lookup tuple + pol commit tree_height; + sel * (constants.AVM_RETRIEVED_BYTECODES_TREE_HEIGHT - tree_height) = 0; + + #[LOW_LEAF_POSEIDON2] + sel { sel, low_leaf_class_id, low_leaf_next_class_id, low_leaf_next_index, low_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + + pol commit updated_low_leaf_hash; + + #[UPDATED_LOW_LEAF_POSEIDON2] + should_insert { sel, low_leaf_class_id, updated_low_leaf_next_class_id, updated_low_leaf_next_index, updated_low_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + + #[LOW_LEAF_MERKLE_CHECK] + sel { should_insert, low_leaf_hash, updated_low_leaf_hash, + low_leaf_index, tree_height, root, intermediate_root } + in merkle_check.start { merkle_check.write, merkle_check.read_node, merkle_check.write_node, + merkle_check.index, merkle_check.path_len, merkle_check.read_root, merkle_check.write_root }; + + // ========= LOW LEAF VALIDATION ========= + pol commit class_id_low_leaf_class_id_diff_inv; + pol CLASS_ID_LOW_LEAF_CLASS_ID_DIFF = class_id - low_leaf_class_id; + + // CLASS_ID_LOW_LEAF_CLASS_ID_DIFF == 0 <==> EXISTS == 1 + #[EXISTS_CHECK] + sel * (CLASS_ID_LOW_LEAF_CLASS_ID_DIFF * (EXISTS * (1 - class_id_low_leaf_class_id_diff_inv) + class_id_low_leaf_class_id_diff_inv) - 1 + EXISTS) = 0; + + // If the leaf doesn't exist, we need to validate that the class id is greater than the low leaf class id + + #[LOW_LEAF_CLASS_ID_VALIDATION] + leaf_not_exists { class_id, low_leaf_class_id, sel } + in ff_gt.sel_gt { ff_gt.a, ff_gt.b, ff_gt.result }; + + // If next class id is not zero (which would be infinity), it has to be greater than the class id. + // We commit next_class_id_is_nonzero instead of next_class_id_is_zero since it'll be used as a selector for a lookup + pol commit next_class_id_is_nonzero; + next_class_id_is_nonzero * (1 - next_class_id_is_nonzero) = 0; + pol NEXT_CLASS_ID_IS_ZERO = 1 - next_class_id_is_nonzero; + + pol commit next_class_id_inv; + #[NEXT_CLASS_ID_IS_ZERO_CHECK] + leaf_not_exists * (low_leaf_next_class_id * (NEXT_CLASS_ID_IS_ZERO * (1 - next_class_id_inv) + next_class_id_inv) - 1 + NEXT_CLASS_ID_IS_ZERO) = 0; + + #[LOW_LEAF_NEXT_CLASS_ID_VALIDATION] + next_class_id_is_nonzero { low_leaf_next_class_id, class_id, sel } + in ff_gt.sel_gt { ff_gt.a, ff_gt.b, ff_gt.result }; + + // ========= NEW LEAF INSERTION ========= + pol commit new_leaf_hash; + + #[NEW_LEAF_POSEIDON2] + should_insert { sel, class_id, low_leaf_next_class_id, low_leaf_next_index, new_leaf_hash } + in poseidon2_hash.end { poseidon2_hash.start, poseidon2_hash.input_0, poseidon2_hash.input_1, poseidon2_hash.input_2, poseidon2_hash.output }; + + #[NEW_LEAF_MERKLE_CHECK] + should_insert { sel, precomputed.zero, new_leaf_hash, + tree_size_before_write, tree_height, intermediate_root, write_root } + in merkle_check.start { merkle_check.write, merkle_check.read_node, merkle_check.write_node, + merkle_check.index, merkle_check.path_len, merkle_check.read_root, merkle_check.write_root }; + diff --git a/barretenberg/cpp/pil/vm2/tx.pil b/barretenberg/cpp/pil/vm2/tx.pil index faaa036dfb0d..d8b6268c3cdc 100644 --- a/barretenberg/cpp/pil/vm2/tx.pil +++ b/barretenberg/cpp/pil/vm2/tx.pil @@ -3,7 +3,9 @@ include "execution.pil"; include "precomputed.pil"; include "trees/note_hash_tree_check.pil"; include "poseidon2_hash.pil"; +include "calldata_hashing.pil"; include "tx_context.pil"; +include "tx_discard.pil"; // Refer to https://excalidraw.com/#json=XcT7u7Ak5rZhWqT51KwTW,VPI-Q1D7hW8_lYhf6V4bNg for a visual guide to the tx trace. // The tx trace manages the various phases that a transaction will undergo while it is being executed. @@ -33,9 +35,14 @@ namespace tx; #[SEL_ON_FIRST_ROW] precomputed.first_row * (1 - sel') = 0; - // Sel can only become off after the last phase (fee collection) + // Sel can only become off after the last phase (cleanup) #[NO_EARLY_END] - sel * (1 - sel') * (1 - is_collect_fee) = 0; + sel * (1 - sel') * (1 - is_cleanup) = 0; + + // Most of transaction initial constraints are in tx_context.pil + pol commit start_tx; + #[START_WITH_SEL] + start_tx' = (1 - sel) * sel'; pol commit phase_value; // if is_padded = 1, this is a padded row @@ -61,9 +68,12 @@ namespace tx; // end_phase = 1, remaining_phase - 1 = 0; pol commit is_revertible; pol commit reverted; + reverted * (1 - reverted) = 0; + #[END_PHASE_ON_REVERT] + sel * reverted * (1 - end_phase) = 0; // phase_value stays the same unless we are at end_phase or reverted #[PHASE_VALUE_CONTINUITY] - NOT_PHASE_END * (1 - reverted) * (1 - precomputed.first_row) * (phase_value' - phase_value) = 0; + NOT_PHASE_END * (1 - precomputed.first_row) * (phase_value' - phase_value) = 0; // At the start of a new phase, we need to increment the phase value #[INCR_PHASE_VALUE_ON_END] NOT_LAST * (1 - reverted) * end_phase * (phase_value' - (phase_value + 1)) = 0; @@ -71,9 +81,6 @@ namespace tx; // If reverted == 1, is_revertible must be 1 reverted * (1 - is_revertible) = 0; - // TODO: Constrain - pol commit discard; - // Control flow pol commit read_pi_offset; // index in public inputs to read data from -> Revisit! pol commit read_pi_length_offset; // index in public inputs to read lengths from @@ -91,7 +98,7 @@ namespace tx; pol REM_COUNT_MINUS_1 = remaining_phase_counter - 1; // If remaining_phase_counter == 1, end_phase = 1 #[REM_COUNT_IS_ONE] - sel * (1 - is_padded) * (REM_COUNT_MINUS_1 * (end_phase * (1 - remaining_phase_minus_one_inv) + remaining_phase_minus_one_inv) - 1 + end_phase) = 0; + sel * (1 - reverted) * (1 - is_padded) * (REM_COUNT_MINUS_1 * (end_phase * (1 - remaining_phase_minus_one_inv) + remaining_phase_minus_one_inv) - 1 + end_phase) = 0; // TODO: Add constraints to propagate down the things read from precomputed // while NOT_PHASE_END @@ -99,8 +106,9 @@ namespace tx; start_phase { phase_value, is_public_call_request, - /* Tree Padding Phase */ is_collect_fee, + is_tree_padding, + is_cleanup, is_revertible, read_pi_offset, @@ -121,8 +129,9 @@ namespace tx; precomputed.sel_phase { precomputed.phase_value, precomputed.is_public_call_request_phase, - /* Tree Padding Phase */ precomputed.sel_collect_fee, + precomputed.sel_tree_padding, + precomputed.sel_cleanup, precomputed.is_revertible, precomputed.read_public_input_offset, @@ -148,7 +157,7 @@ namespace tx; // === Phase Lengths === // Read the number of steps in the phase from public inputs - pol IS_ONE_SHOT_PHASE = is_collect_fee; + pol IS_ONE_SHOT_PHASE = is_collect_fee + is_tree_padding + is_cleanup; pol commit sel_read_phase_length; // Read length if start phase and not one shot phase @@ -175,8 +184,21 @@ namespace tx; pol commit msg_sender; pol commit contract_addr; pol commit is_static; + pol commit calldata_size; pol commit calldata_hash; + // TODO: Consider if a permutation is preferable here, and whether that work work with gas: + #[READ_CALLDATA_HASH] + should_process_call_request { + calldata_hash, + calldata_size, + next_context_id + } in calldata_hashing.latch { + calldata_hashing.output_hash, + calldata_hashing.calldata_size, + calldata_hashing.context_id + }; + // Read information relating to the public call request from public inputs #[READ_PUBLIC_CALL_REQUEST_PHASE] is_public_call_request { @@ -194,8 +216,9 @@ namespace tx; public_inputs.cols[3] }; - pol commit context_id; // TODO: Constrain - pol commit next_context_id; // TODO: Constrain + pol commit should_process_call_request; + should_process_call_request = is_public_call_request * (1 - is_padded); + pol commit is_teardown_phase; // TODO: Constrain is_teardown_phase * (1 - is_teardown_phase) = 0; @@ -205,123 +228,133 @@ namespace tx; pol commit next_da_gas_used_sent_to_enqueued_call; // prev_gas_used_sent_to_enqueued_call = is_teardown_phase ? 0 : prev_gas_used - is_public_call_request * ((0 - prev_l2_gas_used) * is_teardown_phase + prev_l2_gas_used - prev_l2_gas_used_sent_to_enqueued_call) = 0; - is_public_call_request * ((0 - prev_da_gas_used) * is_teardown_phase + prev_da_gas_used - prev_da_gas_used_sent_to_enqueued_call) = 0; + should_process_call_request * ((0 - prev_l2_gas_used) * is_teardown_phase + prev_l2_gas_used - prev_l2_gas_used_sent_to_enqueued_call) = 0; + should_process_call_request * ((0 - prev_da_gas_used) * is_teardown_phase + prev_da_gas_used - prev_da_gas_used_sent_to_enqueued_call) = 0; // next_gas_used = is_teardown_gas_phase ? prev_gas_used : next_gas_used_sent_to_enqueued_call // In the teardown phase, next_gas_used_sent_to_enqueued_call is unconstrained on purpose: we don't care about gas usage in teardown - is_public_call_request * ((prev_l2_gas_used - next_l2_gas_used_sent_to_enqueued_call) * is_teardown_phase + next_l2_gas_used_sent_to_enqueued_call - next_l2_gas_used) = 0; - is_public_call_request * ((prev_da_gas_used - next_da_gas_used_sent_to_enqueued_call) * is_teardown_phase + next_da_gas_used_sent_to_enqueued_call - next_da_gas_used) = 0; - - // TODO: Constrain the evolution of the gas limits in the trace + should_process_call_request * ((prev_l2_gas_used - next_l2_gas_used_sent_to_enqueued_call) * is_teardown_phase + next_l2_gas_used_sent_to_enqueued_call - next_l2_gas_used) = 0; + should_process_call_request * ((prev_da_gas_used - next_da_gas_used_sent_to_enqueued_call) * is_teardown_phase + next_da_gas_used_sent_to_enqueued_call - next_da_gas_used) = 0; // Public Call Requests are dispatch to the execution trace // We match the values to the start of an enqueued call in the execution trace - // Commented out for now since we dont have enqueued call flags in execution set - // #[DISPATCH_EXEC_START] - // is_public_call_request { - // context_id, - // next_context_id, - // msg_sender, - // contract_addr, - // fee, - // is_static, - // // Tree State - // prev_note_hash_tree_root, - // prev_note_hash_tree_size, - // prev_num_note_hashes_emitted, - // prev_nullifier_tree_size, - // prev_num_nullifiers_emitted, - // prev_public_data_tree_root, - // prev_public_data_tree_size, - // prev_written_public_data_slots_tree_root, - // prev_written_public_data_slots_tree_size, - // l1_l2_tree_root, - // // Side Effect States - // prev_num_unencrypted_logs, - // prev_num_l2_to_l1_messages, - // // Gas Info - // prev_l2_gas_used_sent_to_enqueued_call, - // prev_da_gas_used_sent_to_enqueued_call, - // l2_gas_limit, - // da_gas_limit - // } in - // execution.enqueued_call_start { - // execution.context_id, - // execution.next_context_id, - // execution.msg_sender, - // execution.contract_address, - // execution.transaction_fee, - // execution.is_static, - // // Tree State - // execution.prev_note_hash_tree_root, - // execution.prev_note_hash_tree_size, - // execution.prev_num_note_hashes_emitted, - // execution.prev_nullifier_tree_size, - // execution.prev_num_nullifiers_emitted, - // execution.prev_public_data_tree_root, - // execution.prev_public_data_tree_size, - // execution.prev_written_public_data_slots_tree_root, - // execution.prev_written_public_data_slots_tree_size, - // execution.l1_l2_tree_root, - // // Side Effect States - // execution.prev_num_unencrypted_logs, - // execution.prev_num_l2_to_l1_messages, - // // Gas Info - // execution.prev_l2_gas_used_sent_to_enqueued_call, - // execution.prev_da_gas_used_sent_to_enqueued_call, - // execution.l2_gas_limit, - // execution.da_gas_limit - // }; + #[DISPATCH_EXEC_START] + should_process_call_request { + next_context_id, + discard, + msg_sender, + contract_addr, + fee, + is_static, + calldata_size, + // Tree State + prev_note_hash_tree_root, + prev_note_hash_tree_size, + prev_num_note_hashes_emitted, + prev_nullifier_tree_root, + prev_nullifier_tree_size, + prev_num_nullifiers_emitted, + prev_public_data_tree_root, + prev_public_data_tree_size, + prev_written_public_data_slots_tree_root, + prev_written_public_data_slots_tree_size, + l1_l2_tree_root, + prev_retrieved_bytecodes_tree_root, + prev_retrieved_bytecodes_tree_size, + // Side Effect States + prev_num_unencrypted_log_fields, + prev_num_l2_to_l1_messages, + // Gas Info + prev_l2_gas_used_sent_to_enqueued_call, + prev_da_gas_used_sent_to_enqueued_call, + l2_gas_limit, + da_gas_limit + } in + execution.enqueued_call_start { + execution.context_id, // next_context_id must be constrained in the execution trace on enqueued_call_start + execution.discard, + execution.msg_sender, + execution.contract_address, + execution.transaction_fee, + execution.is_static, + execution.parent_calldata_size, + // Tree State + execution.prev_note_hash_tree_root, + execution.prev_note_hash_tree_size, + execution.prev_num_note_hashes_emitted, + execution.prev_nullifier_tree_root, + execution.prev_nullifier_tree_size, + execution.prev_num_nullifiers_emitted, + execution.prev_public_data_tree_root, + execution.prev_public_data_tree_size, + execution.prev_written_public_data_slots_tree_root, + execution.prev_written_public_data_slots_tree_size, + execution.l1_l2_tree_root, + execution.prev_retrieved_bytecodes_tree_root, + execution.prev_retrieved_bytecodes_tree_size, + // Side Effect States + execution.prev_num_unencrypted_log_fields, + execution.prev_num_l2_to_l1_messages, + // Gas Info + execution.prev_l2_gas_used, + execution.prev_da_gas_used, + execution.l2_gas_limit, + execution.da_gas_limit + }; // We retrieve the return values at the end of an enqueued call in the execution trace - // Commented out for now since we dont have enqueued call flags in execution set - //#[DISPATCH_EXEC_GET_REVERT] - //is_public_call_request { - // context_id, - // next_context_id, - // reverted, - // // Tree State - // next_note_hash_tree_root, - // next_note_hash_tree_size, - // next_num_note_hashes_emitted, - // next_nullifier_tree_root, - // next_nullifier_tree_size, - // next_num_nullifiers_emitted, - // next_public_data_tree_root, - // next_public_data_tree_size, - // next_written_public_data_slots_tree_root, - // next_written_public_data_slots_tree_size, - // l1_l2_tree_root, - // // Side Effect States - // next_num_unencrypted_logs, - // next_num_l2_to_l1_messages, - // // Gas Info - // next_l2_gas_used_sent_to_enqueued_call, - // next_da_gas_used_sent_to_enqueued_call - //} in - //execution.enqueued_call_end { - // execution.context_id, - // execution.next_context_id, - // execution.sel_error, - // // Tree State - // execution.note_hash_tree_root, - // execution.note_hash_tree_size, - // execution.num_note_hashes_emitted, - // execution.nullifier_tree_root, - // execution.nullifier_tree_size, - // execution.num_nullifiers_emitted, - // execution.public_data_tree_root, - // execution.public_data_tree_size, - // execution.written_public_data_slots_tree_root, - // execution.written_public_data_slots_tree_size, - // execution.l1_l2_tree_root, - // // Side Effect States - // execution.num_unencrypted_logs, - // execution.num_l2_to_l1_messages, - // // Gas Info - // execution.l2_gas_used_sent_to_enqueued_call, - // execution.da_gas_used_sent_to_enqueued_call - //}; + #[DISPATCH_EXEC_END] + should_process_call_request { + next_context_id, + next_context_id', // The context id of the next row must be the one after the execution of the enqueued call + reverted, + discard, + // Tree State + next_note_hash_tree_root, + next_note_hash_tree_size, + next_num_note_hashes_emitted, + next_nullifier_tree_root, + next_nullifier_tree_size, + next_num_nullifiers_emitted, + next_public_data_tree_root, + next_public_data_tree_size, + next_written_public_data_slots_tree_root, + next_written_public_data_slots_tree_size, + l1_l2_tree_root, + next_retrieved_bytecodes_tree_root, + next_retrieved_bytecodes_tree_size, + // Side Effect States + next_num_unencrypted_log_fields, + next_num_l2_to_l1_messages, + // Gas Info + next_l2_gas_used_sent_to_enqueued_call, + next_da_gas_used_sent_to_enqueued_call + } in + execution.enqueued_call_end { + execution.context_id, + execution.next_context_id, + execution.sel_failure, + execution.discard, + // Tree State + execution.note_hash_tree_root, + execution.note_hash_tree_size, + execution.num_note_hashes_emitted, + execution.nullifier_tree_root, + execution.nullifier_tree_size, + execution.num_nullifiers_emitted, + execution.public_data_tree_root, + execution.public_data_tree_size, + execution.written_public_data_slots_tree_root, + execution.written_public_data_slots_tree_size, + execution.l1_l2_tree_root, + execution.retrieved_bytecodes_tree_root, + execution.retrieved_bytecodes_tree_size, + // Side Effect States + execution.num_unencrypted_log_fields, + execution.num_l2_to_l1_messages, + // Gas Info + execution.l2_gas_used, + execution.da_gas_used + }; // === PRIVATE SIDE EFFECT INSERTIONS === pol commit sel_revertible_append_note_hash; @@ -366,7 +399,7 @@ namespace tx; precomputed.zero, sel_revertible_append_note_hash, prev_num_note_hashes_emitted, - discard, + discard, // from tx_discard.pil virtual trace next_note_hash_tree_root } in note_hash_tree_check.write { @@ -409,7 +442,7 @@ namespace tx; prev_nullifier_tree_root, next_nullifier_tree_root, prev_nullifier_tree_size, - discard, + discard, // from tx_discard.pil virtual trace prev_num_nullifiers_emitted, precomputed.zero } in @@ -450,6 +483,7 @@ namespace tx; pol commit should_l2_l1_msg_append; // A msg emit must be written to PI if it didn't revert, and is not discard. should_try_l2_l1_msg_append * ((1 - reverted) * (1 - discard) - should_l2_l1_msg_append) = 0; + // discard is from tx_discard.pil virtual trace should_l2_l1_msg_append * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX + prev_num_l2_to_l1_messages - write_pi_offset) = 0; @@ -518,19 +552,18 @@ namespace tx; pol commit fee_payer_balance; - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // #[BALANCE_READ] - // is_collect_fee { - // fee_payer_balance, - // fee_juice_contract_address, - // fee_juice_balance_slot, - // prev_public_data_tree_root - // } in public_data_check.sel { - // public_data_check.value, - // public_data_check.address, - // public_data_check.slot, - // public_data_check.root - // }; + #[BALANCE_READ] + is_collect_fee { + fee_payer_balance, + fee_juice_contract_address, + fee_juice_balance_slot, + prev_public_data_tree_root + } in public_data_check.sel { + public_data_check.value, + public_data_check.address, + public_data_check.slot, + public_data_check.root + }; #[BALANCE_VALIDATION] is_collect_fee { fee, fee_payer_balance, precomputed.zero } @@ -542,53 +575,53 @@ namespace tx; pol commit uint32_max; is_collect_fee * (uint32_max - 0xffffffff) = 0; - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // TODO: Lookup for now: will need multipermutation - // #[BALANCE_UPDATE] - // is_collect_fee { - // fee_payer_new_balance, - // fee_juice_contract_address, - // fee_juice_balance_slot, - // prev_public_data_tree_root, - // next_public_data_tree_root, - // prev_public_data_tree_size, - // next_public_data_tree_size, - // uint32_max - // } in public_data_check.write { - // public_data_check.value, - // public_data_check.address, - // public_data_check.slot, - // public_data_check.root, - // public_data_check.write_root, - // public_data_check.tree_size_before_write, - // public_data_check.tree_size_after_write, - // public_data_check.clk - // }; + #[BALANCE_UPDATE] + is_collect_fee { + fee_payer_new_balance, + fee_juice_contract_address, + fee_juice_balance_slot, + discard, + prev_public_data_tree_root, + next_public_data_tree_root, + prev_public_data_tree_size, + next_public_data_tree_size, + uint32_max + } is public_data_check.protocol_write { + public_data_check.value, + public_data_check.address, + public_data_check.slot, + public_data_check.discard, + public_data_check.root, + public_data_check.write_root, + public_data_check.tree_size_before_write, + public_data_check.tree_size_after_write, + public_data_check.clk + }; is_collect_fee * (constants.AVM_PUBLIC_INPUTS_TRANSACTION_FEE_ROW_IDX - write_pi_offset) = 0; - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // #[WRITE_FEE_PUBLIC_INPUTS] - // is_collect_fee { write_pi_offset, fee } - // in public_inputs.sel { precomputed.clk, public_inputs.cols[0] }; - - pol commit end_gas_used_pi_offset; // TODO: remove when we can use constants in lookups - end_gas_used_pi_offset = is_collect_fee * constants.AVM_PUBLIC_INPUTS_END_GAS_USED_ROW_IDX; - - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // #[WRITE_END_GAS_USED_PUBLIC_INPUTS] - // is_collect_fee { - // end_gas_used_pi_offset, - // prev_da_gas_used, - // prev_l2_gas_used - // } in - // public_inputs.sel { - // precomputed.clk, - // public_inputs.cols[0], - // public_inputs.cols[1] - // }; - - // TODO: Constrain evolution: Needs to be read from PI on the first row, then teardown can update - pol commit l2_gas_limit; - pol commit da_gas_limit; - + #[WRITE_FEE_PUBLIC_INPUTS] + is_collect_fee { write_pi_offset, fee } + in public_inputs.sel { precomputed.clk, public_inputs.cols[0] }; + + // ===== TREE PADDING ===== + pol commit is_tree_padding; + + // Roots and num emitted don't change, only size changes. + // Necessary since we are allowed in this phase to change tree roots, sizes and emitted counts. + #[NOTE_HASH_TREE_ROOT_IMMUTABLE_IN_PADDING] + is_tree_padding * (prev_note_hash_tree_root - next_note_hash_tree_root) = 0; + #[PAD_NOTE_HASH_TREE] + is_tree_padding * ((prev_note_hash_tree_size + constants.MAX_NOTE_HASHES_PER_TX - prev_num_note_hashes_emitted) - next_note_hash_tree_size) = 0; + #[NOTE_HASHES_EMITTED_IMMUTABLE_IN_PADDING] + is_tree_padding * (prev_num_note_hashes_emitted - next_num_note_hashes_emitted) = 0; + #[NULLIFIER_TREE_ROOT_IMMUTABLE_IN_PADDING] + is_tree_padding * (prev_nullifier_tree_root - next_nullifier_tree_root) = 0; + #[PAD_NULLIFIER_TREE] + is_tree_padding * ((prev_nullifier_tree_size + constants.MAX_NULLIFIERS_PER_TX - prev_num_nullifiers_emitted) - next_nullifier_tree_size) = 0; + #[NULLIFIERS_EMITTED_IMMUTABLE_IN_PADDING] + is_tree_padding * (prev_num_nullifiers_emitted - next_num_nullifiers_emitted) = 0; + + // ===== CLEANUP ===== + // Most constraints for this phase are located in tx_context.pil + pol commit is_cleanup; diff --git a/barretenberg/cpp/pil/vm2/tx_context.pil b/barretenberg/cpp/pil/vm2/tx_context.pil index b4f5fd163375..1543662759ce 100644 --- a/barretenberg/cpp/pil/vm2/tx_context.pil +++ b/barretenberg/cpp/pil/vm2/tx_context.pil @@ -23,6 +23,155 @@ namespace tx; // L1 to L2 tree doesn't evolve during execution of the AVM pol commit l1_l2_tree_root; + pol commit prev_retrieved_bytecodes_tree_root; + pol commit prev_retrieved_bytecodes_tree_size; + + // TODO: Constrain first row read from PI + pol commit prev_l2_gas_used; + pol commit prev_da_gas_used; + + // Gas Info + pol commit l2_gas_limit; + pol commit da_gas_limit; + + // Prev side effect states + pol commit prev_num_unencrypted_log_fields; + pol commit prev_num_l2_to_l1_messages; + + // ==== READ START/END STATE FROM PI ==== + // TODO: Consider making this mechanism generic - might not be worth it in a short trace like the tx one. + + // Read start/end state of the note hash tree from PI + pol commit note_hash_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX - note_hash_pi_offset) = 0; + is_cleanup * (constants.AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NOTE_HASH_TREE_ROW_IDX - note_hash_pi_offset) = 0; + + pol commit should_read_note_hash_tree; + should_read_note_hash_tree = start_tx + is_cleanup; + + #[PUBLIC_INPUTS_NOTE_HASH_TREE] + should_read_note_hash_tree { + note_hash_pi_offset, + prev_note_hash_tree_root, + prev_note_hash_tree_size + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0], + public_inputs.cols[1] + }; + + start_tx * prev_num_note_hashes_emitted = 0; + + // Read start/end state of the nullifier tree from PI + pol commit nullifier_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX - nullifier_pi_offset) = 0; + is_cleanup * (constants.AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_NULLIFIER_TREE_ROW_IDX - nullifier_pi_offset) = 0; + + pol commit should_read_nullifier_tree; + should_read_nullifier_tree = start_tx + is_cleanup; + + #[PUBLIC_INPUTS_NULLIFIER_TREE] + should_read_nullifier_tree { + nullifier_pi_offset, + prev_nullifier_tree_root, + prev_nullifier_tree_size + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0], + public_inputs.cols[1] + }; + + start_tx * prev_num_nullifiers_emitted = 0; + + // Read start/end state of the public data tree from PI + pol commit public_data_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX - public_data_pi_offset) = 0; + is_cleanup * (constants.AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_PUBLIC_DATA_TREE_ROW_IDX - public_data_pi_offset) = 0; + + pol commit should_read_public_data_tree; + should_read_public_data_tree = start_tx + is_cleanup; + + #[PUBLIC_INPUTS_PUBLIC_DATA_TREE] + should_read_public_data_tree { + public_data_pi_offset, + prev_public_data_tree_root, + prev_public_data_tree_size + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0], + public_inputs.cols[1] + }; + + // Ensure the written public data slots tree starts with the expected state on every tx execution. + start_tx * (constants.AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_INITIAL_ROOT - prev_written_public_data_slots_tree_root) = 0; + start_tx * (constants.AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_INITIAL_SIZE - prev_written_public_data_slots_tree_size) = 0; + + // Read the state of the l1 to l2 message tree from PI + pol commit l1_l2_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_START_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX - l1_l2_pi_offset) = 0; + is_cleanup * (constants.AVM_PUBLIC_INPUTS_END_TREE_SNAPSHOTS_L1_TO_L2_MESSAGE_TREE_ROW_IDX - l1_l2_pi_offset) = 0; + + pol commit should_read_l1_l2_tree; + should_read_l1_l2_tree = start_tx + is_cleanup; + + #[PUBLIC_INPUTS_L1_L2_TREE] + should_read_l1_l2_tree { + l1_l2_pi_offset, + l1_l2_tree_root + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + // Ensure the retrieved bytecodes tree starts with the expected state on every tx execution. + start_tx * (constants.AVM_RETRIEVED_BYTECODES_TREE_INITIAL_ROOT - prev_retrieved_bytecodes_tree_root) = 0; + start_tx * (constants.AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE - prev_retrieved_bytecodes_tree_size) = 0; + + // Read start/end state of gas used from PI + pol commit gas_used_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_START_GAS_USED_ROW_IDX - gas_used_pi_offset) = 0; + is_cleanup * (constants.AVM_PUBLIC_INPUTS_END_GAS_USED_ROW_IDX - gas_used_pi_offset) = 0; + + pol commit should_read_gas_used; + should_read_gas_used = start_tx + is_cleanup; + + #[PUBLIC_INPUTS_GAS_USED] + should_read_gas_used { + gas_used_pi_offset, + prev_da_gas_used, + prev_l2_gas_used + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0], + public_inputs.cols[1] + }; + + // Read gas limit and teardown gas limit from PI + + pol commit gas_limit_pi_offset; + start_tx * (constants.AVM_PUBLIC_INPUTS_GAS_SETTINGS_GAS_LIMITS_ROW_IDX - gas_limit_pi_offset) = 0; + is_teardown_phase * (constants.AVM_PUBLIC_INPUTS_GAS_SETTINGS_TEARDOWN_GAS_LIMITS_ROW_IDX - gas_limit_pi_offset) = 0; + + // Gas limit is read twice, but not on start and end, but rather on start and teardown. + // On start we read the general tx gas limit, and on teardown we read the teardown gas limit. + pol commit should_read_gas_limit; + should_read_gas_limit = start_tx + is_teardown_phase; + + #[PUBLIC_INPUTS_READ_GAS_LIMIT] + should_read_gas_limit { + gas_limit_pi_offset, + da_gas_limit, + l2_gas_limit + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0], + public_inputs.cols[1] + }; + + // Reset side effect states on start + start_tx * prev_num_unencrypted_log_fields = 0; + start_tx * prev_num_l2_to_l1_messages = 0; + // Next Tree State pol commit next_note_hash_tree_root; pol commit next_note_hash_tree_size; @@ -38,11 +187,15 @@ namespace tx; pol commit next_written_public_data_slots_tree_root; pol commit next_written_public_data_slots_tree_size; - // Prev side effect states - pol commit prev_num_unencrypted_logs; - pol commit prev_num_l2_to_l1_messages; + pol commit next_retrieved_bytecodes_tree_root; + pol commit next_retrieved_bytecodes_tree_size; + + // Gas Info + pol commit next_l2_gas_used; + pol commit next_da_gas_used; + // Next side effect states - pol commit next_num_unencrypted_logs; + pol commit next_num_unencrypted_log_fields; pol commit next_num_l2_to_l1_messages; pol NOT_LAST_ROW = sel * sel'; @@ -70,49 +223,62 @@ namespace tx; NOT_LAST_ROW * (1 - reverted) * (next_written_public_data_slots_tree_size - prev_written_public_data_slots_tree_size') = 0; #[L1_L2_TREE_ROOT_CONTINUITY] NOT_LAST_ROW * (l1_l2_tree_root - l1_l2_tree_root') = 0; + #[RETRIEVED_BYTECODES_TREE_ROOT_CONTINUITY] + NOT_LAST_ROW * (next_retrieved_bytecodes_tree_root - prev_retrieved_bytecodes_tree_root') = 0; + #[RETRIEVED_BYTECODES_TREE_SIZE_CONTINUITY] + NOT_LAST_ROW * (next_retrieved_bytecodes_tree_size - prev_retrieved_bytecodes_tree_size') = 0; #[NUM_UNENCRYPTED_LOGS_CONTINUITY] - NOT_LAST_ROW * (1 - reverted) * (next_num_unencrypted_logs - prev_num_unencrypted_logs') = 0; + NOT_LAST_ROW * (1 - reverted) * (next_num_unencrypted_log_fields - prev_num_unencrypted_log_fields') = 0; #[NUM_L2_TO_L1_MESSAGES_CONTINUITY] NOT_LAST_ROW * (1 - reverted) * (next_num_l2_to_l1_messages - prev_num_l2_to_l1_messages') = 0; pol commit setup_phase_value; sel * (4 - setup_phase_value) = 0; - // TODO: Commented out for now, to make the bulk test pass before all opcodes are implemented. - // #[RESTORE_STATE_ON_REVERT] - // reverted { - // setup_phase_value, - // reverted, // end_phase = 1 - // prev_note_hash_tree_root', - // prev_note_hash_tree_size', - // prev_num_note_hashes_emitted', - // prev_nullifier_tree_root', - // prev_nullifier_tree_size', - // prev_num_nullifiers_emitted', - // prev_public_data_tree_root', - // prev_public_data_tree_size', - // prev_written_public_data_slots_tree_root', - // prev_written_public_data_slots_tree_size', - // prev_num_unencrypted_logs', - // prev_num_l2_to_l1_messages' - // } in - // tx.sel { - // phase_value, - // end_phase, - // next_note_hash_tree_root, - // next_note_hash_tree_size, - // next_num_note_hashes_emitted, - // next_nullifier_tree_root, - // next_nullifier_tree_size, - // next_num_nullifiers_emitted, - // next_public_data_tree_root, - // next_public_data_tree_size, - // next_written_public_data_slots_tree_root, - // next_written_public_data_slots_tree_size, - // next_num_unencrypted_logs, - // next_num_l2_to_l1_messages - // }; + #[RESTORE_STATE_ON_REVERT] + reverted { + setup_phase_value, + reverted, // end_phase = 1 + prev_note_hash_tree_root', + prev_note_hash_tree_size', + prev_num_note_hashes_emitted', + prev_nullifier_tree_root', + prev_nullifier_tree_size', + prev_num_nullifiers_emitted', + prev_public_data_tree_root', + prev_public_data_tree_size', + prev_written_public_data_slots_tree_root', + prev_written_public_data_slots_tree_size', + prev_num_unencrypted_log_fields', + prev_num_l2_to_l1_messages' + } in + tx.sel { + phase_value, + end_phase, + next_note_hash_tree_root, + next_note_hash_tree_size, + next_num_note_hashes_emitted, + next_nullifier_tree_root, + next_nullifier_tree_size, + next_num_nullifiers_emitted, + next_public_data_tree_root, + next_public_data_tree_size, + next_written_public_data_slots_tree_root, + next_written_public_data_slots_tree_size, + next_num_unencrypted_log_fields, + next_num_l2_to_l1_messages + }; + + #[PROPAGATE_L2_GAS_USED] + NOT_LAST_ROW * (next_l2_gas_used - prev_l2_gas_used') = 0; + #[PROPAGATE_DA_GAS_USED] + NOT_LAST_ROW * (next_da_gas_used - prev_da_gas_used') = 0; + + #[PROPAGATE_L2_GAS_LIMIT] + NOT_LAST_ROW * (1 - is_teardown_phase') * (l2_gas_limit - l2_gas_limit') = 0; + #[PROPAGATE_DA_GAS_LIMIT] + NOT_LAST_ROW * (1 - is_teardown_phase') * (da_gas_limit - da_gas_limit') = 0; // Selectors to allow prev => next state changes on the different phases pol commit sel_can_emit_note_hash; @@ -146,11 +312,16 @@ namespace tx; sel * (1 - sel_can_write_public_data) * (prev_written_public_data_slots_tree_size - next_written_public_data_slots_tree_size) = 0; #[UNENCRYPTED_LOG_COUNT_IMMUTABILITY] - sel * (1 - sel_can_emit_unencrypted_log) * (prev_num_unencrypted_logs - next_num_unencrypted_logs) = 0; + sel * (1 - sel_can_emit_unencrypted_log) * (prev_num_unencrypted_log_fields - next_num_unencrypted_log_fields) = 0; #[L2_TO_L1_MESSAGE_COUNT_IMMUTABILITY] sel * (1 - sel_can_emit_l2_l1_msg) * (prev_num_l2_to_l1_messages - next_num_l2_to_l1_messages) = 0; + #[RETRIEVED_BYTECODES_TREE_ROOT_IMMUTABILITY] + sel * (1 - should_process_call_request) * (prev_retrieved_bytecodes_tree_root - next_retrieved_bytecodes_tree_root) = 0; + #[RETRIEVED_BYTECODES_TREE_SIZE_IMMUTABILITY] + sel * (1 - should_process_call_request) * (prev_retrieved_bytecodes_tree_size - next_retrieved_bytecodes_tree_size) = 0; + // Padded rows are not allowed to change the state #[NOTE_HASH_ROOT_PADDED_IMMUTABILITY] sel * is_padded * (prev_note_hash_tree_root - next_note_hash_tree_root) = 0; @@ -175,21 +346,88 @@ namespace tx; #[WRITTEN_PUBLIC_DATA_SLOTS_SIZE_PADDED_IMMUTABILITY] sel * is_padded * (prev_written_public_data_slots_tree_size - next_written_public_data_slots_tree_size) = 0; + // The retrieved bytecodes tree is already immutable except for nonpadded call request rows + #[UNENCRYPTED_LOG_COUNT_PADDED_IMMUTABILITY] - sel * is_padded * (prev_num_unencrypted_logs - next_num_unencrypted_logs) = 0; + sel * is_padded * (prev_num_unencrypted_log_fields - next_num_unencrypted_log_fields) = 0; #[L2_TO_L1_MESSAGE_COUNT_PADDED_IMMUTABILITY] sel * is_padded * (prev_num_l2_to_l1_messages - next_num_l2_to_l1_messages) = 0; - // Gas Info - // TODO: Constrain first row read from PI - pol commit prev_l2_gas_used; - pol commit prev_da_gas_used; - - pol commit next_l2_gas_used; - pol commit next_da_gas_used; - - #[PROPAGATE_L2_GAS_USED] - NOT_LAST_ROW * (next_l2_gas_used - prev_l2_gas_used') = 0; - #[PROPAGATE_DA_GAS_USED] - NOT_LAST_ROW * (next_da_gas_used - prev_da_gas_used') = 0; - + #[L2_GAS_USED_IMMUTABILITY] + sel * (1 - should_process_call_request) * (prev_l2_gas_used - next_l2_gas_used) = 0; + #[DA_GAS_USED_IMMUTABILITY] + sel * (1 - should_process_call_request) * (prev_da_gas_used - next_da_gas_used) = 0; + + // Write end counters to PI + + // Write note hash count to PI + pol commit array_length_note_hashes_pi_offset; + + is_cleanup * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NOTE_HASHES_ROW_IDX - array_length_note_hashes_pi_offset) = 0; + + #[PUBLIC_INPUTS_WRITE_NOTE_HASH_COUNT] + is_cleanup { + array_length_note_hashes_pi_offset, + prev_num_note_hashes_emitted + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + // Write nullifier count to PI + pol commit array_length_nullifiers_pi_offset; + + is_cleanup * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_NULLIFIERS_ROW_IDX - array_length_nullifiers_pi_offset) = 0; + + #[PUBLIC_INPUTS_WRITE_NULLIFIER_COUNT] + is_cleanup { + array_length_nullifiers_pi_offset, + prev_num_nullifiers_emitted + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + // Public data write counter is handled by the public data check trace due to squashing. + + // Write l2 to l1 message count to PI + pol commit array_length_l2_to_l1_messages_pi_offset; + + is_cleanup * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_ARRAY_LENGTHS_L2_TO_L1_MSGS_ROW_IDX - array_length_l2_to_l1_messages_pi_offset) = 0; + + #[PUBLIC_INPUTS_WRITE_L2_TO_L1_MESSAGE_COUNT] + is_cleanup { + array_length_l2_to_l1_messages_pi_offset, + prev_num_l2_to_l1_messages + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + // Write unencrypted log fields count to PI + pol commit fields_length_unencrypted_logs_pi_offset; + + // The structure of public logs is [fields_length, ...[log_0_size, log_0_addr, ...log_0_fields, ...]] + is_cleanup * (constants.AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_PUBLIC_LOGS_ROW_IDX - fields_length_unencrypted_logs_pi_offset) = 0; + + #[PUBLIC_INPUTS_WRITE_UNENCRYPTED_LOG_COUNT] + is_cleanup { + fields_length_unencrypted_logs_pi_offset, + prev_num_unencrypted_log_fields + } in public_inputs.sel { + precomputed.clk, + public_inputs.cols[0] + }; + + // ===== EXECUTION CONTEXT ID ===== + + // The next available context id. When peforming an enqueued call, this is the context id used by it. + pol commit next_context_id; + // Initial next context id is 1 + #[NEXT_CONTEXT_ID_INITIAL_VALUE] + start_tx * (1 - next_context_id) = 0; + + // We normally propagate the next context id, unless a call is issued, where the execution trace + // controls the next row's next context id. + #[NEXT_CONTEXT_ID_CONTINUITY] + NOT_LAST * (1 - should_process_call_request) * (next_context_id' - next_context_id) = 0; diff --git a/barretenberg/cpp/pil/vm2/tx_discard.pil b/barretenberg/cpp/pil/vm2/tx_discard.pil new file mode 100644 index 000000000000..bdef5510aaa1 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/tx_discard.pil @@ -0,0 +1,48 @@ +/** + * Discarding on error for tx-level operations. + * + * Design Document: https://docs.google.com/document/d/1xz5sZSxTu841K8uvnT8U-nO2X5ZjY8o0TjOYKfT9b6o + * + * This subtrace is focused on managing the changes to the discard column. + * It is a virtual gadget, which is part of the execution trace. + * + * 1. Propagate discard by default. + * 2. Discard and reverted must be 0 for all rows of startup, non-revertibles, setup, fee-payment/tree-padding/cleanup. + * 3. If reverted is 1, discard must be 1. + * 4. Lift/relax propagation only in either of the following two scenarios: + * * A revert is encountered. + * * End of setup is encountered. + */ +namespace tx; // virtual to tx.pil + // No relations will be checked if this identity is satisfied. + #[skippable_if] + sel = 0; // from tx.pil. + + pol commit discard; + discard * (1 - discard) = 0; + + // If discard == 1, is_revertible must be 1 + // Can ONLY discard during revertible phases (revertible insertions, app-logic, teardown) + #[CAN_ONLY_DISCARD_IN_REVERTIBLE_PHASES] + discard * (1 - is_revertible) = 0; + + // If failure == 1, discard must be 1 + #[FAILURE_MUST_DISCARD] + reverted * (1 - discard) = 0; + + // By default, discard's value is propagated to the next row. + // Lift/relax propagation of discard to the next row if: + // 1. A failure (reverted == 1) occurs in the current row. + // 2. This row is the last row of SETUP. + // + // We can know that this row is the "last row of setup" iff: + // a. The current row is NOT revertible: `is_revertible == 0` + // b. AND the next row _is_ revertible: `is_revertible' == 1` + + pol LAST_ROW_OF_SETUP = (1 - is_revertible) * (is_revertible'); + pol PROPAGATE_DISCARD = (1 - LAST_ROW_OF_SETUP) * (1 - reverted); + + // If propagate_discard == 1, discard' = discard. + #[DISCARD_PROPAGATION] + sel * PROPAGATE_DISCARD * (discard' - discard) = 0; + diff --git a/barretenberg/cpp/scripts/README.md b/barretenberg/cpp/scripts/README.md index 8f9174bd02ab..bff4a3deeb4a 100644 --- a/barretenberg/cpp/scripts/README.md +++ b/barretenberg/cpp/scripts/README.md @@ -29,6 +29,6 @@ There are scripts that: ## How - `./scripts/benchmark_client_ivc.sh` lets you run `client_ivc_bench` remotely and analyze the results. -- `./scripts/benchmark_example_ivc_flow_remote.sh` copies the example flow input you'd like to run to the remote machine, runs `bb_cli_bench`, and analyze the results. +- `./scripts/benchmark_example_ivc_flow_remote.sh` copies the example flow input you'd like to run to the remote machine, runs `bb prove`, and analyze the results. - For the script to work you need to have the example flows downloaded locally, by `AZTEC_CACHE_COMMIT=origin/next~3 FORCE_CACHE_DOWNLOAD=1 yarn-project/end-to-end/bootstrap.sh build_bench` - If you have other special needs, look inside the above scripts and see what parameters you can give, or use `./scripts/benchmark_remote.sh`. diff --git a/barretenberg/cpp/scripts/analyze_client_ivc_bench.py b/barretenberg/cpp/scripts/analyze_client_ivc_bench.py deleted file mode 100755 index a61af0fa4522..000000000000 --- a/barretenberg/cpp/scripts/analyze_client_ivc_bench.py +++ /dev/null @@ -1,144 +0,0 @@ -import json -import argparse -from pathlib import Path - -# Define command-line arguments with defaults -parser = argparse.ArgumentParser(description="Analyze benchmark JSON data.") -parser.add_argument("--json", type=Path, default=Path("client_ivc_bench.json"), help="Benchmark JSON file name.") -parser.add_argument("--benchmark", type=str, default="ClientIVCBench/Full/6", help="Benchmark name to analyze.") -parser.add_argument("--prefix", type=Path, default=Path("build-op-count-time"), help="Prefix path for benchmark files.") -args = parser.parse_args() - -IVC_BENCH_JSON = args.json -BENCHMARK = args.benchmark -PREFIX = args.prefix - -# Single out an independent set of functions accounting for most of BENCHMARK's real_time -to_keep = [ - "construct_circuits(t)", - "create_circuit(t)", - "DeciderProvingKey(Circuit&)(t)", - "ProtogalaxyProver::prove(t)", - "Decider::construct_proof(t)", - "ECCVMProver(CircuitBuilder&)(t)", - "ECCVMProver::construct_proof(t)", - "TranslatorProver::construct_proof(t)", - "Goblin::merge(t)", - "parse(t)", - "load_and_decompress(t)" -] - -with open(PREFIX / IVC_BENCH_JSON, "r") as read_file: - read_result = json.load(read_file) - for _bench in read_result["benchmarks"]: - if _bench["name"] == BENCHMARK or BENCHMARK == "": - bench = _bench - -bench_components = dict(filter(lambda x: x[0] in to_keep, bench.items())) - -# For each kept time, get the proportion over all kept times. -sum_of_kept_times_ms = sum(float(time) for _, time in bench_components.items()) / 1e6 - -max_label_length = max(len(label) for label in to_keep) -column = {"function": "function", "ms": "ms", "%": "% sum"} -print("\nClientIVC Benchmark Analysis: \n") -print(f"{column['function']:<{max_label_length}}{column['ms']:>8} {column['%']:>8}") - -for key in to_keep: - if key not in bench: - time_ms = 0 - else: - time_ms = bench[key] / 1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/sum_of_kept_times_ms:>8.2%}") - -# Validate that kept times account for most of the total measured time. -total_time_ms = bench["real_time"] -totals = '\nTotal time accounted for: {:.0f}ms/{:.0f}ms = {:.2%}' -totals = totals.format( - sum_of_kept_times_ms, total_time_ms, sum_of_kept_times_ms/total_time_ms) -print(totals) - -print("\nMajor contributors:") -print( - f"{column['function']:<{max_label_length}}{column['ms']:>8} {column['%']:>7}") -for key in ['commit(t)', 'compute_combiner(t)', 'compute_perturbator(t)', 'compute_univariate(t)']: - if key not in bench: - time_ms = 0 - else: - time_ms = bench[key]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/sum_of_kept_times_ms:>8.2%}") - -print('\nBreakdown of ProtogalaxyProver::prove:') -protogalaxy_round_labels = [ - "ProtogalaxyProver_::run_oink_prover_on_each_incomplete_key(t)", - "ProtogalaxyProver_::perturbator_round(t)", - "ProtogalaxyProver_::combiner_quotient_round(t)", - "ProtogalaxyProver_::update_target_sum_and_fold(t)" -] -max_label_length = max(len(label) for label in protogalaxy_round_labels) -for key in protogalaxy_round_labels: - if key not in bench: - time_ms = 0 - else: - time_ms = bench[key]/1e6 - total_time_ms = bench["ProtogalaxyProver::prove(t)"]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/total_time_ms:>8.2%}") - -# Extract a set of components from the benchmark data and display timings and relative percentages -def print_contributions(prefix, ivc_bench_json, bench_name, components): - - # Read JSON file and extract benchmark - try: - with open(prefix / ivc_bench_json, "r") as read_file: - read_result = json.load(read_file) - bench = next((_bench for _bench in read_result["benchmarks"] if _bench["name"] == bench_name or bench_name == ""), None) - if not bench: - raise ValueError(f"Benchmark '{bench_name}' not found in the JSON file.") - except FileNotFoundError: - print(f"File not found: {prefix / ivc_bench_json}") - return - - # Filter and sum up kept times - bench_components = {key: bench[key] for key in components if key in bench} - sum_of_kept_times_ms = sum(float(time) for time in bench_components.values()) / 1e6 - print(f"Total time accounted for (ms): {sum_of_kept_times_ms:>8.0f}") - - # Print results - max_label_length = max(len(label) for label in components) - column_headers = {"operation": "operation", "ms": "ms", "%": "% sum"} - print(f"{column_headers['operation']:<{max_label_length}}{column_headers['ms']:>8} {column_headers['%']:>8}") - - for key in components: - time_ms = bench_components.get(key, 0) / 1e6 - percentage = time_ms / sum_of_kept_times_ms if sum_of_kept_times_ms > 0 else 0 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {percentage:>8.2%}") - -relations = [ - "Arithmetic::accumulate(t)", - "Permutation::accumulate(t)", - "Lookup::accumulate(t)", - "DeltaRange::accumulate(t)", - "Elliptic::accumulate(t)", - "Memory::accumulate(t)", - "NonNativeField::accumulate(t)", - "EccOp::accumulate(t)", - "DatabusRead::accumulate(t)", - "PoseidonExt::accumulate(t)", - "PoseidonInt::accumulate(t)", -] - -print('\nRelation contributions (times to be interpreted relatively):') -print_contributions(PREFIX, IVC_BENCH_JSON, BENCHMARK, relations) - -commitments = [ - "COMMIT::wires(t)", - "COMMIT::z_perm(t)", - "COMMIT::databus(t)", - "COMMIT::ecc_op_wires(t)", - "COMMIT::lookup_inverses(t)", - "COMMIT::databus_inverses(t)", - "COMMIT::lookup_counts_tags(t)", -] - -print('\nCommitment contributions:') -print_contributions(PREFIX, IVC_BENCH_JSON, BENCHMARK, commitments) diff --git a/barretenberg/cpp/scripts/analyze_protogalaxy_bench.py b/barretenberg/cpp/scripts/analyze_protogalaxy_bench.py deleted file mode 100755 index bb1906678eb1..000000000000 --- a/barretenberg/cpp/scripts/analyze_protogalaxy_bench.py +++ /dev/null @@ -1,61 +0,0 @@ -import json -from pathlib import Path - -PREFIX = Path("build-op-count-time") -PROTOGALAXY_BENCH_JSON = Path("protogalaxy_bench.json") -BENCHMARK = "fold_k/16" - -# Single out an independent set of functions accounting for most of BENCHMARK's real_time -to_keep = [ - "ProtogalaxyProver::prove(t)", -] -with open(PREFIX/PROTOGALAXY_BENCH_JSON, "r") as read_file: - read_result = json.load(read_file) - for _bench in read_result["benchmarks"]: - print(_bench) - if _bench["name"] == BENCHMARK: - bench = _bench -bench_components = dict(filter(lambda x: x[0] in to_keep, bench.items())) - -# For each kept time, get the proportion over all kept times. -sum_of_kept_times_ms = sum(float(time) - for _, time in bench_components.items())/1e6 -max_label_length = max(len(label) for label in to_keep) -column = {"function": "function", "ms": "ms", "%": "% sum"} -print( - f"{column['function']:<{max_label_length}}{column['ms']:>8} {column['%']:>8}") -for key in to_keep: - time_ms = bench[key]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/sum_of_kept_times_ms:>8.2%}") - -# Validate that kept times account for most of the total measured time. -total_time_ms = bench["real_time"] -totals = '\nTotal time accounted for: {:.0f}ms/{:.0f}ms = {:.2%}' -totals = totals.format( - sum_of_kept_times_ms, total_time_ms, sum_of_kept_times_ms/total_time_ms) -print(totals) - -print("\nMajor contributors:") -print( - f"{column['function']:<{max_label_length}}{column['ms']:>8} {column['%']:>7}") -for key in ['commit(t)', 'compute_combiner(t)', 'compute_perturbator(t)', 'compute_univariate(t)']: - if key not in bench: - time_ms = 0 - else: - time_ms = bench[key]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/sum_of_kept_times_ms:>8.2%}") - -print('\nBreakdown of ProtogalaxyProver::prove:') -protogalaxy_round_labels = [ - "ProtogalaxyProver_::run_oink_prover_on_each_incomplete_key(t)", - "ProtogalaxyProver_::perturbator_round(t)", - "ProtogalaxyProver_::combiner_quotient_round(t)", - "ProtogalaxyProver_::update_target_sum_and_fold(t)" -] -max_label_length = max(len(label) for label in protogalaxy_round_labels) -for key in protogalaxy_round_labels: - time_ms = bench[key]/1e6 - total_time_ms = bench["ProtogalaxyProver::prove(t)"]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/total_time_ms:>8.2%}") - - diff --git a/barretenberg/cpp/scripts/analyze_vm_compile_time.sh b/barretenberg/cpp/scripts/analyze_vm_compile_time.sh index 894edfc80662..3e0da46f99c2 100755 --- a/barretenberg/cpp/scripts/analyze_vm_compile_time.sh +++ b/barretenberg/cpp/scripts/analyze_vm_compile_time.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash # This script summarises the compilation time for the vm # The summary json file is outputted to $BUILD_DIR/avm_compilation_summary.json -# it takes in two params the preset(e.g. clang16, clang16-dbg) and a target (e.g. bb, vm) -# it can be called like this => ./analyze_vm_compile_time.sh clang16 bb +# it takes in two params the preset(e.g. clang20, debug) and a target (e.g. bb, vm) +# it can be called like this => ./analyze_vm_compile_time.sh clang20 bb set -eu # So we can glob recursively shopt -s globstar -export PRESET="${1:-clang16}" +export PRESET="${1:-clang20}" export TARGET="${2:-vm2}" BUILD_DIR=build-$PRESET-compiler-profile diff --git a/barretenberg/cpp/scripts/bench_cpu_scaling_local.sh b/barretenberg/cpp/scripts/bench_cpu_scaling_local.sh new file mode 100755 index 000000000000..9579510d1f33 --- /dev/null +++ b/barretenberg/cpp/scripts/bench_cpu_scaling_local.sh @@ -0,0 +1,281 @@ +#!/bin/bash + +# CPU scaling benchmark that runs benchmarks locally +# This script runs a command multiple times with different HARDWARE_CONCURRENCY values +# and tracks the scaling performance of specific BB_BENCH entries +# Uses --bench_out flag to get JSON output for accurate timing extraction + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +NC='\033[0m' # No Color + +# Parse arguments +if [ $# -lt 2 ]; then + echo -e "${RED}Usage: $0 \"benchmark_name\" \"command\" [cpu_counts]${NC}" + echo -e "Example: $0 \"ClientIvcProve\" \"./build/bin/bb prove --ivc_inputs_path input.msgpack --scheme client_ivc\"" + echo -e "Example: $0 \"construct_mock_function_circuit\" \"./build/bin/ultra_honk_bench --benchmark_filter=.*power_of_2.*/15\" \"1,2,4,8\"" + exit 1 +fi + +BENCH_NAME="$1" +COMMAND="$2" +CPU_LIST="${3:-1,2,4,8,16}" + +# Convert comma-separated list to array +IFS=',' read -ra CPU_COUNTS <<< "$CPU_LIST" + +# Create output directory with timestamp +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +OUTPUT_DIR="bench_scaling_local_${TIMESTAMP}" +mkdir -p "$OUTPUT_DIR" + +# Results file +RESULTS_FILE="$OUTPUT_DIR/scaling_results.txt" +CSV_FILE="$OUTPUT_DIR/scaling_results.csv" + +echo -e "${GREEN}╔════════════════════════════════════════════════════════════════╗${NC}" +echo -e "${GREEN}║ CPU Scaling Benchmark (Local Execution) ║${NC}" +echo -e "${GREEN}╚════════════════════════════════════════════════════════════════╝${NC}" +echo "" +echo -e "${CYAN}Benchmark Entry:${NC} ${YELLOW}$BENCH_NAME${NC}" +echo -e "${CYAN}Command:${NC} $COMMAND" +echo -e "${CYAN}CPU Counts:${NC} ${CPU_COUNTS[@]}" +echo -e "${CYAN}Machine:${NC} $(hostname)" +echo -e "${CYAN}Output Directory:${NC} $OUTPUT_DIR" +echo "" + +# Initialize results file +echo "CPU Scaling Benchmark: $BENCH_NAME" > "$RESULTS_FILE" +echo "Command: $COMMAND" >> "$RESULTS_FILE" +echo "Machine: $(hostname)" >> "$RESULTS_FILE" +echo "Date: $(date)" >> "$RESULTS_FILE" +echo "================================================" >> "$RESULTS_FILE" +echo "" >> "$RESULTS_FILE" + +# Initialize CSV file +echo "CPUs,Time_ms,Time_s,Speedup,Efficiency" > "$CSV_FILE" + +# Function to extract time for specific benchmark entry from JSON +extract_bench_time() { + local json_file=$1 + local bench_name=$2 + + # Extract time from JSON file using grep and sed + # JSON format is: {"benchmark_name": time_in_nanoseconds, ...} + local time_ns="" + + if [ -f "$json_file" ]; then + # Extract the value for the specific benchmark name from JSON + time_ns=$(grep -oP "\"${bench_name//\\/\\\\}\":\s*\K\d+" "$json_file" 2>/dev/null | head -1) + fi + + # If JSON extraction failed, try to extract from log file (fallback) + if [ -z "$time_ns" ] && [ -f "${json_file%/bench.json}/output.log" ]; then + local log_file="${json_file%/bench.json}/output.log" + # Try to extract from hierarchical BB_BENCH output + # Look for pattern like: " ├─ ClientIvcProve ... 28.13s" + local time_s=$(grep -E "├─.*${bench_name}" "$log_file" | grep -oP '\d+\.\d+s' | grep -oP '\d+\.\d+' | head -1) + if [ -n "$time_s" ]; then + # Convert seconds to nanoseconds + time_ns=$(awk -v s="$time_s" 'BEGIN{printf "%.0f", s * 1000000000}') + fi + fi + + echo "$time_ns" +} + +# Store baseline time for speedup calculation +BASELINE_TIME="" + +# Arrays to store results +declare -a ALL_CPUS=() +declare -a ALL_TIMES=() +declare -a ALL_SPEEDUPS=() + +echo -e "${BLUE}Starting benchmark runs locally...${NC}" +echo "" + +# Run benchmark for each CPU count +for cpu_count in "${CPU_COUNTS[@]}"; do + echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${YELLOW}Running with ${cpu_count} CPU(s)...${NC}" + + # Create output subdirectory + run_dir="$OUTPUT_DIR/run_${cpu_count}cpus" + mkdir -p "$run_dir" + log_file="$run_dir/output.log" + bench_json_file="$run_dir/bench.json" + + # Run command locally with specified CPU count + echo -e "${CYAN}Executing locally...${NC}" + start_time=$(date +%s.%N) + + # Clean up any stale benchmark file + rm -f "$bench_json_file" + + # Execute the command locally with HARDWARE_CONCURRENCY environment variable + # Add --bench_out flag to get JSON output + HARDWARE_CONCURRENCY=$cpu_count eval "$COMMAND --bench_out $bench_json_file" 2>&1 | tee "$log_file" + + end_time=$(date +%s.%N) + wall_time=$(awk -v e="$end_time" -v s="$start_time" 'BEGIN{printf "%.2f", e-s}') + + # Extract the specific benchmark time from JSON file + bench_time_ns=$(extract_bench_time "$bench_json_file" "$BENCH_NAME") + + if [ -z "$bench_time_ns" ] || [ "$bench_time_ns" = "0" ]; then + echo -e "${RED}Warning: Could not extract timing for '$BENCH_NAME' from JSON${NC}" + echo -e "${YELLOW}Check the JSON file: $bench_json_file${NC}" + + # Show what's in the JSON file for debugging + if [ -f "$bench_json_file" ]; then + echo -e "${YELLOW}JSON content (first 500 chars):${NC}" + head -c 500 "$bench_json_file" + echo "" + fi + + echo "CPUs: $cpu_count - No timing data found" >> "$RESULTS_FILE" + continue + fi + + # Convert to milliseconds and seconds + bench_time_ms=$(awk -v ns="$bench_time_ns" 'BEGIN{printf "%.2f", ns / 1000000}') + bench_time_s=$(awk -v ns="$bench_time_ns" 'BEGIN{printf "%.3f", ns / 1000000000}') + + # Calculate speedup and efficiency + if [ -z "$BASELINE_TIME" ]; then + BASELINE_TIME="$bench_time_ns" + speedup="1.00" + efficiency="100.0" + else + speedup=$(awk -v base="$BASELINE_TIME" -v curr="$bench_time_ns" 'BEGIN{printf "%.2f", base / curr}') + efficiency=$(awk -v sp="$speedup" -v cpus="$cpu_count" 'BEGIN{printf "%.1f", (sp / cpus) * 100}') + fi + + # Store results + ALL_CPUS+=("$cpu_count") + ALL_TIMES+=("$bench_time_ms") + ALL_SPEEDUPS+=("$speedup") + + # Write to results file + echo "CPUs: $cpu_count" >> "$RESULTS_FILE" + echo " Time: ${bench_time_ms} ms (${bench_time_s} s)" >> "$RESULTS_FILE" + echo " Speedup: ${speedup}x" >> "$RESULTS_FILE" + echo " Efficiency: ${efficiency}%" >> "$RESULTS_FILE" + echo " Wall time: ${wall_time}s" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + + # Write to CSV + echo "$cpu_count,$bench_time_ms,$bench_time_s,$speedup,$efficiency" >> "$CSV_FILE" + + # Display results + echo -e "${GREEN}✓ Completed${NC}" + echo -e " ${CYAN}Time for '$BENCH_NAME':${NC} ${bench_time_ms} ms" + echo -e " ${CYAN}Speedup:${NC} ${speedup}x" + echo -e " ${CYAN}Efficiency:${NC} ${efficiency}%" + echo "" +done + +# Generate summary +echo -e "${GREEN}╔════════════════════════════════════════════════════════════════╗${NC}" +echo -e "${GREEN}║ SUMMARY ║${NC}" +echo -e "${GREEN}╚════════════════════════════════════════════════════════════════╝${NC}" +echo "" + +# Print table header +printf "${CYAN}%-8s %-15s %-12s %-12s${NC}\n" "CPUs" "Time (ms)" "Speedup" "Efficiency" +printf "${CYAN}%-8s %-15s %-12s %-12s${NC}\n" "────" "──────────" "───────" "──────────" + +# Print results table +for i in "${!ALL_CPUS[@]}"; do + cpu="${ALL_CPUS[$i]}" + time="${ALL_TIMES[$i]}" + speedup="${ALL_SPEEDUPS[$i]}" + + if [ "$i" -eq 0 ]; then + efficiency="100.0%" + else + efficiency=$(awk -v sp="$speedup" -v cpus="$cpu" 'BEGIN{printf "%.1f%%", (sp / cpus) * 100}') + fi + + # Color code based on efficiency + if [ "$i" -eq 0 ]; then + color="${GREEN}" + else + eff_val=$(echo "$efficiency" | sed 's/%//') + if awk -v eff="$eff_val" 'BEGIN {exit !(eff > 75)}'; then + color="${GREEN}" + elif awk -v eff="$eff_val" 'BEGIN {exit !(eff > 50)}'; then + color="${YELLOW}" + else + color="${RED}" + fi + fi + + printf "${color}%-8s %-15s %-12s %-12s${NC}\n" "$cpu" "$time" "${speedup}x" "$efficiency" +done + +echo "" +echo -e "${MAGENTA}═══════════════════════════════════════════════════════════════${NC}" +echo "" + +# Generate scaling plot (ASCII art) +echo -e "${CYAN}Scaling Visualization:${NC}" +echo "" + +if [ "${#ALL_TIMES[@]}" -gt 0 ]; then + # Find max time for scaling + max_time=$(printf '%s\n' "${ALL_TIMES[@]}" | sort -rn | head -1) + + # Create ASCII bar chart + for i in "${!ALL_CPUS[@]}"; do + cpu="${ALL_CPUS[$i]}" + time="${ALL_TIMES[$i]}" + + # Calculate bar length (max 50 chars) + bar_len=$(awk -v t="$time" -v m="$max_time" 'BEGIN{printf "%.0f", (t/m) * 50}') + + # Create bar + bar="" + for ((j=0; j "$RESULTS_FILE" +echo "Command: $COMMAND" >> "$RESULTS_FILE" +echo "Remote Host: $BB_SSH_INSTANCE" >> "$RESULTS_FILE" +echo "Date: $(date)" >> "$RESULTS_FILE" +echo "================================================" >> "$RESULTS_FILE" +echo "" >> "$RESULTS_FILE" + +# Initialize CSV file +echo "CPUs,Time_ms,Time_s,Speedup,Efficiency" > "$CSV_FILE" + +# Function to extract time for specific benchmark entry from JSON +extract_bench_time() { + local json_file=$1 + local bench_name=$2 + + # Extract time from JSON file using grep and sed + # JSON format is: {"benchmark_name": time_in_nanoseconds, ...} + local time_ns="" + + if [ -f "$json_file" ]; then + # Extract the value for the specific benchmark name from JSON + time_ns=$(grep -oP "\"${bench_name//\\/\\\\}\":\s*\K\d+" "$json_file" 2>/dev/null | head -1) + fi + + # If JSON extraction failed, try to extract from log file (fallback) + if [ -z "$time_ns" ] && [ -f "${json_file%/bench.json}/output.log" ]; then + local log_file="${json_file%/bench.json}/output.log" + # Try to extract from hierarchical BB_BENCH output + # Look for pattern like: " ├─ ClientIvcProve ... 28.13s" + local time_s=$(grep -E "├─.*${bench_name}" "$log_file" | grep -oP '\d+\.\d+s' | grep -oP '\d+\.\d+' | head -1) + if [ -n "$time_s" ]; then + # Convert seconds to nanoseconds + time_ns=$(awk -v s="$time_s" 'BEGIN{printf "%.0f", s * 1000000000}') + fi + fi + + echo "$time_ns" +} + +# Store baseline time for speedup calculation +BASELINE_TIME="" + +# Arrays to store results +declare -a ALL_CPUS=() +declare -a ALL_TIMES=() +declare -a ALL_SPEEDUPS=() + +echo -e "${BLUE}Starting benchmark runs on remote machine...${NC}" +echo "" + +# Run benchmark for each CPU count +for cpu_count in "${CPU_COUNTS[@]}"; do + echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${YELLOW}Running with ${cpu_count} CPU(s)...${NC}" + + # Create output subdirectory + run_dir="$OUTPUT_DIR/run_${cpu_count}cpus" + mkdir -p "$run_dir" + log_file="$run_dir/output.log" + + # Run command on remote machine with specified CPU count + echo -e "${CYAN}Executing on remote via benchmark_remote.sh...${NC}" + start_time=$(date +%s.%N) + + # Clean up any stale benchmark file from previous runs on remote + ssh $BB_SSH_KEY $BB_SSH_INSTANCE "rm -f /tmp/bench_${cpu_count}.json" 2>/dev/null + + # Use benchmark_remote.sh to execute on remote with --bench_out for JSON output + # The benchmark_remote.sh script handles locking and setup + # Use tee to show output in real-time AND save to log file + bench_json_file="$run_dir/bench.json" + ./scripts/benchmark_remote.sh bb "HARDWARE_CONCURRENCY=$cpu_count $COMMAND --bench_out /tmp/bench_${cpu_count}.json" 2>&1 | tee "$log_file" + + # Retrieve the JSON file from remote + ssh $BB_SSH_KEY $BB_SSH_INSTANCE "cat /tmp/bench_${cpu_count}.json" > "$bench_json_file" 2>/dev/null + + # Clean up the remote benchmark file after retrieval + ssh $BB_SSH_KEY $BB_SSH_INSTANCE "rm -f /tmp/bench_${cpu_count}.json" 2>/dev/null + + end_time=$(date +%s.%N) + wall_time=$(awk -v e="$end_time" -v s="$start_time" 'BEGIN{printf "%.2f", e-s}') + + # Extract the specific benchmark time from JSON file + bench_time_ns=$(extract_bench_time "$bench_json_file" "$BENCH_NAME") + + if [ -z "$bench_time_ns" ] || [ "$bench_time_ns" = "0" ]; then + echo -e "${RED}Warning: Could not extract timing for '$BENCH_NAME' from JSON${NC}" + echo -e "${YELLOW}Check the JSON file: $bench_json_file${NC}" + + # Show what's in the JSON file for debugging + if [ -f "$bench_json_file" ]; then + echo -e "${YELLOW}JSON content (first 500 chars):${NC}" + head -c 500 "$bench_json_file" + echo "" + fi + + echo "CPUs: $cpu_count - No timing data found" >> "$RESULTS_FILE" + continue + fi + + # Convert to milliseconds and seconds + bench_time_ms=$(awk -v ns="$bench_time_ns" 'BEGIN{printf "%.2f", ns / 1000000}') + bench_time_s=$(awk -v ns="$bench_time_ns" 'BEGIN{printf "%.3f", ns / 1000000000}') + + # Calculate speedup and efficiency + if [ -z "$BASELINE_TIME" ]; then + BASELINE_TIME="$bench_time_ns" + speedup="1.00" + efficiency="100.0" + else + speedup=$(awk -v base="$BASELINE_TIME" -v curr="$bench_time_ns" 'BEGIN{printf "%.2f", base / curr}') + efficiency=$(awk -v sp="$speedup" -v cpus="$cpu_count" 'BEGIN{printf "%.1f", (sp / cpus) * 100}') + fi + + # Store results + ALL_CPUS+=("$cpu_count") + ALL_TIMES+=("$bench_time_ms") + ALL_SPEEDUPS+=("$speedup") + + # Write to results file + echo "CPUs: $cpu_count" >> "$RESULTS_FILE" + echo " Time: ${bench_time_ms} ms (${bench_time_s} s)" >> "$RESULTS_FILE" + echo " Speedup: ${speedup}x" >> "$RESULTS_FILE" + echo " Efficiency: ${efficiency}%" >> "$RESULTS_FILE" + echo " Wall time: ${wall_time}s" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + + # Write to CSV + echo "$cpu_count,$bench_time_ms,$bench_time_s,$speedup,$efficiency" >> "$CSV_FILE" + + # Display results + echo -e "${GREEN}✓ Completed${NC}" + echo -e " ${CYAN}Time for '$BENCH_NAME':${NC} ${bench_time_ms} ms" + echo -e " ${CYAN}Speedup:${NC} ${speedup}x" + echo -e " ${CYAN}Efficiency:${NC} ${efficiency}%" + echo "" +done + +# Generate summary +echo -e "${GREEN}╔════════════════════════════════════════════════════════════════╗${NC}" +echo -e "${GREEN}║ SUMMARY ║${NC}" +echo -e "${GREEN}╚════════════════════════════════════════════════════════════════╝${NC}" +echo "" + +# Print table header +printf "${CYAN}%-8s %-15s %-12s %-12s${NC}\n" "CPUs" "Time (ms)" "Speedup" "Efficiency" +printf "${CYAN}%-8s %-15s %-12s %-12s${NC}\n" "────" "──────────" "───────" "──────────" + +# Print results table +for i in "${!ALL_CPUS[@]}"; do + cpu="${ALL_CPUS[$i]}" + time="${ALL_TIMES[$i]}" + speedup="${ALL_SPEEDUPS[$i]}" + + if [ "$i" -eq 0 ]; then + efficiency="100.0%" + else + efficiency=$(awk -v sp="$speedup" -v cpus="$cpu" 'BEGIN{printf "%.1f%%", (sp / cpus) * 100}') + fi + + # Color code based on efficiency + if [ "$i" -eq 0 ]; then + color="${GREEN}" + else + eff_val=$(echo "$efficiency" | sed 's/%//') + if awk -v eff="$eff_val" 'BEGIN {exit !(eff > 75)}'; then + color="${GREEN}" + elif awk -v eff="$eff_val" 'BEGIN {exit !(eff > 50)}'; then + color="${YELLOW}" + else + color="${RED}" + fi + fi + + printf "${color}%-8s %-15s %-12s %-12s${NC}\n" "$cpu" "$time" "${speedup}x" "$efficiency" +done + +echo "" +echo -e "${MAGENTA}═══════════════════════════════════════════════════════════════${NC}" +echo "" + +# Generate scaling plot (ASCII art) +echo -e "${CYAN}Scaling Visualization:${NC}" +echo "" + +if [ "${#ALL_TIMES[@]}" -gt 0 ]; then + # Find max time for scaling + max_time=$(printf '%s\n' "${ALL_TIMES[@]}" | sort -rn | head -1) + + # Create ASCII bar chart + for i in "${!ALL_CPUS[@]}"; do + cpu="${ALL_CPUS[$i]}" + time="${ALL_TIMES[$i]}" + + # Calculate bar length (max 50 chars) + bar_len=$(awk -v t="$time" -v m="$max_time" 'BEGIN{printf "%.0f", (t/m) * 50}') + + # Create bar + bar="" + for ((j=0; j/dev/null + +echo "" diff --git a/barretenberg/cpp/scripts/bench_hardware_concurrency.sh b/barretenberg/cpp/scripts/bench_hardware_concurrency.sh new file mode 100755 index 000000000000..13224b38e8aa --- /dev/null +++ b/barretenberg/cpp/scripts/bench_hardware_concurrency.sh @@ -0,0 +1,286 @@ +#!/usr/bin/env bash + +# Script to benchmark bb prove with different HARDWARE_CONCURRENCY values +# Usage: ./bench_hardware_concurrency.sh [concurrency_values...] +# Example: ./bench_hardware_concurrency.sh 1 2 4 8 16 32 +# +# To run on a remote machine with ci.sh shell-new: +# ./ci.sh shell-new "./ci3/cache_download bb-client-ivc-captures-ba1369853ed8670e.tar.gz ; \ +# mv example-app-ivc-inputs-out yarn-project/end-to-end 2>/dev/null ; \ +# DENOISE=1 DISABLE_AZTEC_VM=1 ./barretenberg/cpp/bootstrap.sh build_native ; \ +# DENOISE=1 ./barretenberg/cpp/scripts/bench_hardware_concurrency.sh" +# +# To save the output to a file: +# ./ci.sh shell-new "..." > cpu_scaling_report.md +# +# To run with specific CPU counts: +# ./ci.sh shell-new "... ./barretenberg/cpp/scripts/bench_hardware_concurrency.sh 1 2 4 8" + +REPO_ROOT=$(git rev-parse --show-toplevel) + +# Use ci3 script base. +source $REPO_ROOT/ci3/source_bootstrap + +# Use provided arguments or default values +if [ $# -eq 0 ]; then + CONCURRENCY_VALUES=(1 2 4 8 16 32) +else + CONCURRENCY_VALUES=("$@") +fi + +# Set DENOISE to 0 by default if not already set +DENOISE=${DENOISE:-0} + +# Test cases to run (0 and 1 recursions) +TEST_CASES=( + "ecdsar1+transfer_0_recursions+private_fpc" + "ecdsar1+transfer_1_recursions+private_fpc" +) + +# Function to run benchmark for a specific test case +run_benchmark() { + local test_case=$1 + local concurrency=$2 + + local input_path="$REPO_ROOT/yarn-project/end-to-end/example-app-ivc-inputs-out/$test_case/ivc-inputs.msgpack" + local bench_file="/tmp/bench_${test_case}_concurrency_${concurrency}.json" + + if [ ! -f "$input_path" ]; then + echo "Warning: Input file not found: $input_path" >&2 + return 1 + fi + + # Run the command with specified concurrency + local cmd="BB_BENCH=1 HARDWARE_CONCURRENCY=$concurrency $REPO_ROOT/barretenberg/cpp/build/bin/bb prove --scheme client_ivc --output_path /tmp --ivc_inputs_path $input_path --bench_out $bench_file" + + if [ "$DENOISE" = "1" ]; then + DENOISE=1 denoise "$cmd" >&2 + else + eval "$cmd" >&2 + fi + + echo "$bench_file" +} + +# Run all benchmarks +echo "Running benchmarks for HARDWARE_CONCURRENCY values: ${CONCURRENCY_VALUES[@]}" >&2 +echo "" >&2 + +for test_case in "${TEST_CASES[@]}"; do + echo "Running benchmarks for $test_case..." >&2 + for concurrency in "${CONCURRENCY_VALUES[@]}"; do + echo " HARDWARE_CONCURRENCY=$concurrency" >&2 + bench_file=$(run_benchmark "$test_case" "$concurrency") + if [ $? -ne 0 ]; then + echo " Failed to run benchmark" >&2 + fi + done + echo "" >&2 +done + +# Now generate the report to stdout +echo "# Barretenberg running time vs number of CPUs by component" +echo "" + +# Python script to process JSON files and generate markdown report +python3 << 'EOF' +import json +import os +import re +from collections import defaultdict +from pathlib import Path + +# Configuration +concurrency_values = [1, 2, 4, 8, 16, 32] +test_cases = [ + "ecdsar1+transfer_0_recursions+private_fpc", + "ecdsar1+transfer_1_recursions+private_fpc" +] + +# Function to load benchmark data +def load_benchmark_data(test_case, concurrency): + filename = f"/tmp/bench_{test_case}_concurrency_{concurrency}.json" + if not os.path.exists(filename): + return None + try: + with open(filename, 'r') as f: + return json.load(f) + except: + return None + +# Function to calculate speedup and efficiency +def calc_metrics(baseline_time, current_time, num_cpus): + if baseline_time == 0: + return "N/A", "N/A" + speedup = baseline_time / current_time + efficiency = (speedup / num_cpus) * 100 + return f"{speedup:.1f}x", f"{efficiency:.0f}%" + +# Function to format time +def format_time(ms): + if ms >= 1000: + return f"{ms/1000:.2f}s" + else: + return f"{ms:.2f}ms" + +# Function to extract data (convert nanoseconds to milliseconds) +def extract_data(data): + results = {} + for key, value in data.items(): + # Value is in nanoseconds, convert to milliseconds + results[key] = value / 1_000_000 + return results + +# Function to generate table row +def generate_table_row(label, times_by_cpu, available_cpus, count=None): + row = [f"**{label}**"] + + # Find the baseline CPU (1 if available, else minimum) + cpus_with_data = sorted([cpu for cpu in available_cpus if cpu in times_by_cpu]) + if not cpus_with_data: + return " | ".join(row + ["N/A"] * len(available_cpus)) + + baseline_cpu = 1 if 1 in times_by_cpu else min(cpus_with_data) + baseline = times_by_cpu.get(baseline_cpu, 0) + + for cpu in available_cpus: + if cpu not in times_by_cpu: + row.append("N/A") + continue + + time_ms = times_by_cpu[cpu] + time_str = format_time(time_ms) + + if cpu == baseline_cpu: + if baseline_cpu == 1: + row.append(f"{time_str}
(1.0x, 100%)") + else: + row.append(f"{time_str}
(baseline)") + else: + if baseline_cpu == 1: + speedup, efficiency = calc_metrics(baseline, time_ms, cpu) + else: + # When baseline is not 1 CPU, show relative speedup + speedup_factor = baseline / time_ms + speedup = f"{speedup_factor:.1f}x" + efficiency = f"{(speedup_factor / (cpu/baseline_cpu)) * 100:.0f}%" + + if count and count > 1: + per_iter = format_time(time_ms / count) + row.append(f"{time_str}
({speedup}, {efficiency})
{per_iter}×{count}") + else: + row.append(f"{time_str}
({speedup}, {efficiency})") + + return " | ".join(row) + +# Process each test case +for test_case in test_cases: + print(f"\n## {test_case}") + print() + + # Load all data for this test case + all_data = {} + for cpu in concurrency_values: + data = load_benchmark_data(test_case, cpu) + if data: + all_data[cpu] = extract_data(data) + + if not all_data: + print(f"No data available for {test_case}") + continue + + # Group metrics by component + components = defaultdict(lambda: defaultdict(dict)) + + for cpu, metrics in all_data.items(): + for metric_name, time_ms in metrics.items(): + # Categorize metrics based on name + if "ClientIvc" in metric_name or "ClientIVC" in metric_name: + components["Main"][metric_name][cpu] = time_ms + elif "ProtogalaxyProver" in metric_name: + components["ProtogalaxyProver"][metric_name][cpu] = time_ms + elif "OinkProver" in metric_name: + components["OinkProver"][metric_name][cpu] = time_ms + elif "Decider" in metric_name: + components["Decider"][metric_name][cpu] = time_ms + elif "Goblin" in metric_name: + components["Goblin"][metric_name][cpu] = time_ms + elif "ECCVM" in metric_name: + components["ECCVM"][metric_name][cpu] = time_ms + elif "Translator" in metric_name: + components["Translator"][metric_name][cpu] = time_ms + elif "sumcheck" in metric_name.lower(): + components["Sumcheck"][metric_name][cpu] = time_ms + elif "commit" in metric_name.lower() or "Commitment" in metric_name: + components["Commitment"][metric_name][cpu] = time_ms + elif any(x in metric_name for x in ["trace", "populate", "wire", "permutation", "lookup"]): + components["Circuit Building"][metric_name][cpu] = time_ms + else: + components["Other"][metric_name][cpu] = time_ms + + # Determine which CPU values actually have data + available_cpus = sorted(set(all_data.keys())) + + # Generate tables for each component + sections = [ + ("Main Components", "Main"), + ("ProtogalaxyProver Components", "ProtogalaxyProver"), + ("OinkProver", "OinkProver"), + ("Decider", "Decider"), + ("Goblin", "Goblin"), + ("ECCVM", "ECCVM"), + ("Translator", "Translator"), + ("Sumcheck", "Sumcheck"), + ("Commitment Operations", "Commitment"), + ("Circuit Building", "Circuit Building"), + ] + + for section_title, component_key in sections: + if component_key in components and components[component_key]: + print(f"\n### {section_title}") + print() + + # Table header - only show columns for CPUs we have data for + header = ["Function"] + [f"{cpu} CPU{'s' if cpu > 1 else ''}" for cpu in available_cpus] + print("| " + " | ".join(header) + " |") + print("|" + "|".join(["-" * (len(h) + 2) for h in header]) + "|") + + # Sort metrics by baseline time (descending) + sorted_metrics = sorted( + components[component_key].items(), + key=lambda x: x[1].get(1, 0), + reverse=True + ) + + # Generate rows + for metric_name, times in sorted_metrics[:20]: # Limit to top 20 + # Try to detect if this is a repeated operation + count_match = re.search(r'×(\d+)', metric_name) + count = int(count_match.group(1)) if count_match else None + + # Clean up metric name + clean_name = metric_name.replace('ProtogalaxyProver::', '').replace('OinkProver::', '') + + row = generate_table_row(clean_name, times, available_cpus, count) + print("| " + row + " |") + + # Add summary statistics + if "Main" in components and len(available_cpus) > 1: + print(f"\n### Summary") + print() + min_cpu = min(available_cpus) + max_cpu = max(available_cpus) + print(f"| Metric | {min_cpu} CPU{'s' if min_cpu > 1 else ''} | {max_cpu} CPUs | Speedup | Efficiency |") + print("|--------|-------|---------|---------|------------|") + + for metric_name, times in components["Main"].items(): + if min_cpu in times and max_cpu in times: + baseline = times[min_cpu] + final = times[max_cpu] + speedup, efficiency = calc_metrics(baseline, final, max_cpu/min_cpu if min_cpu != 1 else max_cpu) + print(f"| {metric_name} | {format_time(baseline)} | {format_time(final)} | {speedup} | {efficiency} |") + +EOF + +echo "" +echo "Report generation complete!" diff --git a/barretenberg/cpp/scripts/benchmark.sh b/barretenberg/cpp/scripts/benchmark.sh index 6b6758700f48..e8da49f8dc0e 100755 --- a/barretenberg/cpp/scripts/benchmark.sh +++ b/barretenberg/cpp/scripts/benchmark.sh @@ -3,7 +3,7 @@ set -eu BENCHMARK=${1:-goblin_bench} COMMAND=${2:-./$BENCHMARK} -PRESET=${3:-clang16} +PRESET=${3:-clang20} BUILD_DIR=${4:-build} @@ -12,9 +12,9 @@ cd $(dirname $0)/.. # Configure and build. cmake --preset $PRESET -cmake --build --preset $PRESET --target $BENCHMARK +cmake --build --preset $PRESET --target $BENCHMARK cd $BUILD_DIR # Consistency with _wasm.sh targets / shorter $COMMAND. cp ./bin/$BENCHMARK . -$COMMAND \ No newline at end of file +$COMMAND diff --git a/barretenberg/cpp/scripts/benchmark_client_ivc.sh b/barretenberg/cpp/scripts/benchmark_client_ivc.sh index baacc910ff34..92de32ff3747 100755 --- a/barretenberg/cpp/scripts/benchmark_client_ivc.sh +++ b/barretenberg/cpp/scripts/benchmark_client_ivc.sh @@ -2,8 +2,8 @@ set -eu TARGET=${1:-"client_ivc_bench"} -BENCHMARK="ClientIVCBench/Full/6" -BUILD_DIR="build-op-count-time" +BENCHMARK="ClientIVCBench/Full/5" +BUILD_DIR="build" FILTER="${BENCHMARK}$" # '$' to ensure only specified bench is run # Move above script dir. @@ -11,16 +11,12 @@ cd $(dirname $0)/.. # Measure the benchmarks with ops time counting ./scripts/benchmark_remote.sh "$TARGET"\ - "./$TARGET --benchmark_filter=$FILTER\ + "BB_BENCH=1 ./$TARGET --benchmark_filter=$FILTER\ --benchmark_out=$TARGET.json\ --benchmark_out_format=json"\ - op-count-time\ + clang20\ "$BUILD_DIR" # Retrieve output from benching instance cd $BUILD_DIR scp $BB_SSH_KEY $BB_SSH_INSTANCE:$BB_SSH_CPP_PATH/build/$TARGET.json . - -# Analyze the results -cd ../ -python3 ./scripts/analyze_client_ivc_bench.py --json "$TARGET.json" --benchmark "$BENCHMARK" --prefix "$BUILD_DIR" diff --git a/barretenberg/cpp/scripts/benchmark_example_ivc_flow_remote.sh b/barretenberg/cpp/scripts/benchmark_example_ivc_flow_remote.sh index a5e9dd968c0a..9e40ca57bec9 100755 --- a/barretenberg/cpp/scripts/benchmark_example_ivc_flow_remote.sh +++ b/barretenberg/cpp/scripts/benchmark_example_ivc_flow_remote.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash set -eu -TARGET=${1:-"bb_cli_bench"} +TARGET=${1:-"bb"} #FLOW=${2:-"ecdsar1+amm_add_liquidity_1_recursions+sponsored_fpc"} #FLOW=${2:-"ecdsar1+transfer_1_recursions+private_fpc"} #FLOW=${2:-"ecdsar1+transfer_1_recursions+sponsored_fpc"} #FLOW=${2:-"ecdsar1+transfer_1_recursions+sponsored_fpc"} FLOW=${2:-"schnorr+deploy_tokenContract_with_registration+sponsored_fpc"} -BUILD_DIR="build-op-count-time" +BUILD_DIR="build" # Move above script dir. cd $(dirname $0)/.. @@ -15,17 +15,9 @@ cd $(dirname $0)/.. scp $BB_SSH_KEY ../../yarn-project/end-to-end/example-app-ivc-inputs-out/$FLOW/ivc-inputs.msgpack $BB_SSH_INSTANCE:$BB_SSH_CPP_PATH/build/ # Measure the benchmarks with ops time counting + ./scripts/benchmark_remote.sh "$TARGET"\ - "MAIN_ARGS='prove -o output --ivc_inputs_path ivc-inputs.msgpack --scheme client_ivc'\ - ./$TARGET --benchmark_out=$TARGET.json\ - --benchmark_out_format=json"\ - op-count-time\ + "./$TARGET prove -o output --ivc_inputs_path ivc-inputs.msgpack --scheme client_ivc\ + --print_bench"\ + clang20\ "$BUILD_DIR" - -# Retrieve output from benching instance -cd $BUILD_DIR -scp $BB_SSH_KEY $BB_SSH_INSTANCE:$BB_SSH_CPP_PATH/build/$TARGET.json . - -# Analyze the results -cd ../ -python3 ./scripts/analyze_client_ivc_bench.py --json "$TARGET.json" --benchmark "" --prefix "$BUILD_DIR" diff --git a/barretenberg/cpp/scripts/benchmark_field_ops_percentage.sh b/barretenberg/cpp/scripts/benchmark_field_ops_percentage.sh index 0725b7a1cea8..a6def49911b0 100755 --- a/barretenberg/cpp/scripts/benchmark_field_ops_percentage.sh +++ b/barretenberg/cpp/scripts/benchmark_field_ops_percentage.sh @@ -25,8 +25,8 @@ FIELD_OP_COSTS=field_op_costs.json if [ ! -f $FIELD_OP_COSTS ]; then cd ../ FIELD_OPS_TARGET=fr_straight_bench - cmake --preset clang16 - cmake --build --preset clang16 --target $FIELD_OPS_TARGET + cmake --preset clang20 + cmake --build --preset clang20 --target $FIELD_OPS_TARGET cd build ./bin/$FIELD_OPS_TARGET --benchmark_out=../$BUILD_OP_COUNT_TRACK_DIR/$FIELD_OP_COSTS \ --benchmark_out_format=json diff --git a/barretenberg/cpp/scripts/benchmark_protogalaxy.sh b/barretenberg/cpp/scripts/benchmark_protogalaxy.sh index 8eb4d808c3f0..ceb009546cb0 100755 --- a/barretenberg/cpp/scripts/benchmark_protogalaxy.sh +++ b/barretenberg/cpp/scripts/benchmark_protogalaxy.sh @@ -3,18 +3,18 @@ set -eu TARGET="protogalaxy_bench" FILTER="/16$" -BUILD_DIR=build-op-count-time +BUILD_DIR=build # Move above script dir. cd $(dirname $0)/.. # Measure the benchmarks with ops time counting ./scripts/benchmark_remote.sh protogalaxy_bench\ - "./protogalaxy_bench --benchmark_filter=$FILTER\ + "BB_BENCH=1 ./protogalaxy_bench --benchmark_filter=$FILTER\ --benchmark_out=$TARGET.json\ --benchmark_out_format=json"\ - op-count-time\ - build-op-count-time + clang20\ + $BUILD_DIR # Retrieve output from benching instance cd $BUILD_DIR diff --git a/barretenberg/cpp/scripts/benchmark_remote.sh b/barretenberg/cpp/scripts/benchmark_remote.sh index 3c32180f1608..705dfe2e0f70 100755 --- a/barretenberg/cpp/scripts/benchmark_remote.sh +++ b/barretenberg/cpp/scripts/benchmark_remote.sh @@ -9,8 +9,8 @@ set -eu BENCHMARK=${1:-client_ivc_bench} COMMAND=${2:-./$BENCHMARK} -PRESET=${3:-clang16-no-avm} -BUILD_DIR=${4:-build} +PRESET=${3:-clang20-no-avm} +BUILD_DIR=${4:-build-no-avm} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} # Move above script dir. diff --git a/barretenberg/cpp/scripts/benchmarks.sh b/barretenberg/cpp/scripts/benchmarks.sh index f749554189cc..4d43ddf87a5d 100755 --- a/barretenberg/cpp/scripts/benchmarks.sh +++ b/barretenberg/cpp/scripts/benchmarks.sh @@ -5,8 +5,8 @@ set -eu cd $(dirname $0)/.. # Configure and build. -cmake --preset clang16 -cmake --build --preset clang16 +cmake --preset clang20 +cmake --build --preset clang20 cd build diff --git a/barretenberg/cpp/scripts/ci_benchmark_ivc_flows.sh b/barretenberg/cpp/scripts/ci_benchmark_ivc_flows.sh index 902fb773f722..9aa9888f90bb 100755 --- a/barretenberg/cpp/scripts/ci_benchmark_ivc_flows.sh +++ b/barretenberg/cpp/scripts/ci_benchmark_ivc_flows.sh @@ -4,12 +4,10 @@ source $(git rev-parse --show-toplevel)/ci3/source if [[ $# -ne 2 ]]; then - echo "Usage: $0 " + echo "Usage: $0 " exit 1 fi cd .. -export input_folder="$1" -benchmark_output="$2" echo_header "bb ivc flow bench" @@ -25,10 +23,10 @@ function verify_ivc_flow { # TODO(AD): Checking which one would be good, but there isn't too much that can go wrong here. set +e echo_stderr "Private verify." - "./$native_build_dir/bin/bb" verify --scheme client_ivc -p "$proof" -k ../../yarn-project/bb-prover/artifacts/private-civc-vk 1>&2 + "./$native_build_dir/bin/bb" verify --scheme client_ivc -p "$proof" -k ../../noir-projects/noir-protocol-circuits/target/keys/hiding_kernel_to_rollup.ivc.vk 1>&2 local private_result=$? echo_stderr "Private verify: $private_result." - "./$native_build_dir/bin/bb" verify --scheme client_ivc -p "$proof" -k ../../yarn-project/bb-prover/artifacts/public-civc-vk 1>&2 + "./$native_build_dir/bin/bb" verify --scheme client_ivc -p "$proof" -k ../../noir-projects/noir-protocol-circuits/target/keys/hiding_kernel_to_public.ivc.vk 1>&2 local public_result=$? echo_stderr "Public verify: $public_result." if [[ $private_result -eq $public_result ]]; then @@ -36,7 +34,7 @@ function verify_ivc_flow { exit 1 fi if [[ $private_result -ne 0 ]] && [[ $public_result -ne 0 ]]; then - echo_stderr "Verification failed for $flow. Did not verify with precalculated verification key - we may need to revisit how it is generated in yarn-project/bb-prover." + echo_stderr "Verification failed for $flow. Did not verify with precalculated verification key - we may need to revisit how it is generated in noir-projects/noir-protocol-circuits." exit 1 fi } @@ -44,23 +42,18 @@ function verify_ivc_flow { function run_bb_cli_bench { local runtime="$1" local output="$2" - local args="$3" - export MAIN_ARGS="$args" + shift 2 if [[ "$runtime" == "native" ]]; then - memusage "./$native_build_dir/bin/bb_cli_bench" \ - --benchmark_out=$output/op-counts.json \ - --benchmark_out_format=json || { - echo "bb_cli_bench native failed with args: $args" + memusage "./$native_build_dir/bin/bb" "$@" || { + echo "bb native failed with args: $@" exit 1 } else # wasm export WASMTIME_ALLOWED_DIRS="--dir=$flow_folder --dir=$output" # TODO support wasm op count time preset - memusage scripts/wasmtime.sh $WASMTIME_ALLOWED_DIRS ./build-wasm-threads/bin/bb_cli_bench \ - --benchmark_out=$output/op-counts.json \ - --benchmark_out_format=json || { - echo "bb_cli_bench wasm failed with args: $args" + memusage scripts/wasmtime.sh $WASMTIME_ALLOWED_DIRS ./build-wasm-threads/bin/bb "$@" || { + echo "bb wasm failed with args: $@" exit 1 } fi @@ -79,11 +72,7 @@ function client_ivc_flow { mkdir -p "$output" export MEMUSAGE_OUT="$output/peak-memory-mb.txt" - run_bb_cli_bench "$runtime" "$output" "prove -o $output --ivc_inputs_path $flow_folder/ivc-inputs.msgpack --scheme client_ivc -v" - - if [[ "${NATIVE_PRESET:-}" == op-count-time && "$runtime" != wasm ]]; then - python3 scripts/analyze_client_ivc_bench.py --prefix . --json $output/op-counts.json --benchmark "" - fi + run_bb_cli_bench "$runtime" "$output" prove -o $output --ivc_inputs_path $flow_folder/ivc-inputs.msgpack --scheme client_ivc -v --print_bench local end=$(date +%s%N) local elapsed_ns=$(( end - start )) diff --git a/barretenberg/cpp/scripts/collect_profile_information.sh b/barretenberg/cpp/scripts/collect_profile_information.sh index 1323e157b020..0acef0bb5fc2 100755 --- a/barretenberg/cpp/scripts/collect_profile_information.sh +++ b/barretenberg/cpp/scripts/collect_profile_information.sh @@ -43,7 +43,7 @@ function shorten_cpp_names() { } # Process benchmark file. -llvm-xray-16 stack xray-log.$EXECUTABLE.* \ +llvm-xray-20 stack xray-log.$EXECUTABLE.* \ --instr_map=./bin/$EXECUTABLE --stack-format=flame --aggregate-threads --aggregation-type=time --all-stacks \ | node ../scripts/llvm_xray_stack_flame_corrector.js \ | shorten_cpp_names \ diff --git a/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh b/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh index 717b7be7800b..4cf0995c1951 100755 --- a/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh +++ b/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh @@ -12,7 +12,7 @@ # Specify the benchmark suite and the "baseline" branch against which to compare BENCHMARK=${1:-client_ivc_bench} FILTER=${2:-""} -PRESET=${3:-clang16} +PRESET=${3:-clang20} BUILD_DIR=${4:-build} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} @@ -58,4 +58,4 @@ echo -e "\nRunning benchmark in feature branch.." $BENCH_TOOLS_DIR/compare.py benchmarks $BUILD_DIR/results_before.json $BUILD_DIR/results_after.json # Return to branch from which the script was called -git checkout - \ No newline at end of file +git checkout - diff --git a/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh b/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh index 50cf755990a5..b0034901e07e 100755 --- a/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh +++ b/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh @@ -12,7 +12,7 @@ # Specify the benchmark suite and the "baseline" branch against which to compare BENCHMARK=${1:-client_ivc_bench} FILTER=${2:-"*."} -PRESET=${3:-clang16} +PRESET=${3:-clang20} BUILD_DIR=${4:-build} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} @@ -57,4 +57,4 @@ scp $BB_SSH_KEY $BB_SSH_INSTANCE:$BB_SSH_CPP_PATH/build/results_before.json $BUI $BENCH_TOOLS_DIR/compare.py benchmarks $BUILD_DIR/results_before.json $BUILD_DIR/results_after.json # Return to branch from which the script was called -git checkout - \ No newline at end of file +git checkout - diff --git a/barretenberg/cpp/scripts/find-bb b/barretenberg/cpp/scripts/find-bb new file mode 100755 index 000000000000..2f66aff9db36 --- /dev/null +++ b/barretenberg/cpp/scripts/find-bb @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +source $(git rev-parse --show-toplevel)/ci3/source + +echo $root/barretenberg/cpp/$(./native-preset-build-dir)/bin/bb diff --git a/barretenberg/cpp/scripts/lmdblib_tests.sh b/barretenberg/cpp/scripts/lmdblib_tests.sh index 73b99d5bb13f..d130906d463f 100755 --- a/barretenberg/cpp/scripts/lmdblib_tests.sh +++ b/barretenberg/cpp/scripts/lmdblib_tests.sh @@ -7,7 +7,7 @@ cd $(dirname $0)/.. DEFAULT_TESTS=LMDBStoreTest.*:LMDBEnvironmentTest.* TEST=${1:-$DEFAULT_TESTS} -PRESET=${PRESET:-clang16} +PRESET=${PRESET:-clang20} cmake --build --preset $PRESET --target lmdblib_tests ./build/bin/lmdblib_tests --gtest_filter=$TEST diff --git a/barretenberg/cpp/scripts/merkle_tree_tests.sh b/barretenberg/cpp/scripts/merkle_tree_tests.sh index 2b7719b0cdb2..38479fb652da 100755 --- a/barretenberg/cpp/scripts/merkle_tree_tests.sh +++ b/barretenberg/cpp/scripts/merkle_tree_tests.sh @@ -7,7 +7,7 @@ cd $(dirname $0)/.. DEFAULT_TESTS=PersistedIndexedTreeTest.*:PersistedAppendOnlyTreeTest.*:LMDBTreeStoreTest.*:PersistedContentAddressedIndexedTreeTest.*:PersistedContentAddressedAppendOnlyTreeTest.*:ContentAddressedCacheTest.* TEST=${1:-$DEFAULT_TESTS} -PRESET=${PRESET:-clang16} +PRESET=${PRESET:-clang20} cmake --build --preset $PRESET --target crypto_merkle_tree_tests ./build/bin/crypto_merkle_tree_tests --gtest_filter=$TEST diff --git a/barretenberg/cpp/scripts/native-preset-build-dir b/barretenberg/cpp/scripts/native-preset-build-dir index 17f3692a6d9a..b61426592749 100755 --- a/barretenberg/cpp/scripts/native-preset-build-dir +++ b/barretenberg/cpp/scripts/native-preset-build-dir @@ -5,7 +5,7 @@ set -euo pipefail cd $(dirname $0) -preset=${NATIVE_PRESET:-clang16-assert} +preset=${NATIVE_PRESET:-clang20} declare -A preset_to_dir preset_to_inherits diff --git a/barretenberg/cpp/scripts/run_test.sh b/barretenberg/cpp/scripts/run_test.sh index 5f345b162502..c362de0b6aa8 100755 --- a/barretenberg/cpp/scripts/run_test.sh +++ b/barretenberg/cpp/scripts/run_test.sh @@ -4,7 +4,7 @@ # It means we can return a concise, easy to read, easy to run command for reproducing a test run. set -eu -export native_preset=${NATIVE_PRESET:-clang16-assert} +export native_preset=${NATIVE_PRESET:-clang20} cd $(dirname $0)/.. # E.g. build, build-debug or build-coverage diff --git a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh index 38f84a5b765d..6422bc101769 100755 --- a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh @@ -11,7 +11,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-civc-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-civc-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-civc-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="ca12eef6" +pinned_short_hash="93eaa5ec" pinned_civc_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-civc-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { diff --git a/barretenberg/cpp/scripts/test_coverage.sh b/barretenberg/cpp/scripts/test_coverage.sh index f60e45bed415..707cb046220c 100755 --- a/barretenberg/cpp/scripts/test_coverage.sh +++ b/barretenberg/cpp/scripts/test_coverage.sh @@ -6,7 +6,7 @@ cd .. # For more verbose logging export CI=1 # Affects meaning of 'native' in bootstrap and run_test.sh -export NATIVE_PRESET=clang16-coverage +export NATIVE_PRESET=clang20-coverage # target max time of 15 minutes, but timing out at all is painful so bump high export TIMEOUT=40m ./bootstrap.sh build_native @@ -20,7 +20,7 @@ function test_cmds { # ../acir_tests/bootstrap.sh test_cmds | grep -v main.js | grep -v browser # echo "disabled-cache NO_WASM=1 barretenberg/cpp/bootstrap.sh bench_ivc origin/master" } -(test_cmds || exit 1) | parallelise +(test_cmds || exit 1) | parallelize # Run llvm-profdata to merge raw profiles llvm-profdata-16 merge -sparse build-coverage/profdata/*.profraw -o build-coverage/coverage.profdata @@ -30,7 +30,7 @@ for bin in ./build-coverage/bin/*_tests; do done # Generate coverage report with llvm-cov -llvm-cov-16 show \ +llvm-cov-20 show \ -instr-profile=build-coverage/coverage.profdata \ -format=html \ -output-dir=build-coverage/coverage-report \ diff --git a/barretenberg/cpp/scripts/world_state_tests.sh b/barretenberg/cpp/scripts/world_state_tests.sh index 2eba93bbf626..917ffbc1b6cd 100755 --- a/barretenberg/cpp/scripts/world_state_tests.sh +++ b/barretenberg/cpp/scripts/world_state_tests.sh @@ -6,7 +6,7 @@ set -e cd $(dirname $0)/.. TEST=${1:-*} -PRESET=${PRESET:-clang16} +PRESET=${PRESET:-clang20} cmake --build --preset $PRESET --target world_state_tests ./build/bin/world_state_tests --gtest_filter=WorldStateTest.${TEST} diff --git a/barretenberg/cpp/scripts/zig-ar.sh b/barretenberg/cpp/scripts/zig-ar.sh new file mode 100755 index 000000000000..9bb97ee0d2c4 --- /dev/null +++ b/barretenberg/cpp/scripts/zig-ar.sh @@ -0,0 +1,2 @@ +#!/bin/bash +exec zig ar "$@" diff --git a/barretenberg/cpp/scripts/zig-ranlib.sh b/barretenberg/cpp/scripts/zig-ranlib.sh new file mode 100755 index 000000000000..774f6523dd58 --- /dev/null +++ b/barretenberg/cpp/scripts/zig-ranlib.sh @@ -0,0 +1,2 @@ +#!/bin/bash +exec zig ranlib "$@" diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index 864d466e2b18..e8305d00eca2 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -26,7 +26,7 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") endif() if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 18) - # We target clang18 and need this, eventually warning should be fixed or this will be unconditional. + # We target clang20 and need this, eventually warning should be fixed or this will be unconditional. add_compile_options(-Wno-vla-cxx-extension) # This gets in the way of a partial object initialization (i.e. MyClass my_class{ .my_member = init_value }) add_compile_options(-Wno-missing-field-initializers) @@ -49,7 +49,12 @@ if(WASM) add_link_options(-Wl,--export-memory,--import-memory,--stack-first,-z,stack-size=1048576,--max-memory=4294967296) endif() -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${MSGPACK_INCLUDE} ${TRACY_INCLUDE} ${LMDB_INCLUDE} ${LIBDEFLATE_INCLUDE}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${MSGPACK_INCLUDE} ${TRACY_INCLUDE} ${LMDB_INCLUDE} ${LIBDEFLATE_INCLUDE} ${HTTPLIB_INCLUDE} ${NLOHMANN_JSON_INCLUDE}) + +# Add avm-transpiler include path when library is provided +if(AVM_TRANSPILER_LIB) + include_directories(${AVM_TRANSPILER_INCLUDE}) +endif() # I feel this should be limited to ecc, however it's currently used in headers that go across libraries, # and there currently isn't an easy way to inherit the DDISABLE_ASM parameter. @@ -252,7 +257,7 @@ if(WASM) DEPENDS ${CMAKE_BINARY_DIR}/bin/barretenberg.wasm.gz ) - if(CHECK_CIRCUIT_STACKTRACES) + if(ENABLE_STACKTRACES) target_link_libraries( barretenberg.wasm PUBLIC diff --git a/barretenberg/cpp/src/barretenberg/acir_formal_proofs/README.md b/barretenberg/cpp/src/barretenberg/acir_formal_proofs/README.md index aaf34cd4d947..c0470e050ce7 100644 --- a/barretenberg/cpp/src/barretenberg/acir_formal_proofs/README.md +++ b/barretenberg/cpp/src/barretenberg/acir_formal_proofs/README.md @@ -1,4 +1,4 @@ -#Formal Verification of ACIR Instructions +# Formal Verification of ACIR Instructions This module provides formal verification capabilities for ACIR (Arithmetic Circuit Intermediate Representation) instructions generated from Noir SSA code. @@ -20,31 +20,31 @@ The verifier uses SMT (Satisfiability Modulo Theories) solving to formally verif | Opcode | Lhs type/size | Rhs type/size | Time/seconds | Memory/GB | Success | SMT Term Type | Reason | | ----------- | ------------- | ------------- | ------------ | --------- | ------- | ---------------- | ------------------------------------------------------------------------------------------------------------- | -| Binary::Add | Field | Field | 0.024 | - | ✓ -| TermType::FFTerm | | | Binary::Add | Unsigned_127 | Unsigned_127 | 2.8 | - | ✓ -| TermType::BVTerm | | | Binary::And | Unsigned_32 | Unsigned_32 | 6.7 | - | ✓ -| TermType::BVTerm | | | Binary::And | Unsigned_127 | Unsigned_127 | 7.5 | - | ✗ | TermType::BVTerm | [smt solver lookup doesnt support 2bits tables](https://github.com/AztecProtocol/aztec-packages/issues/11721) | -| Binary::Div | Field | Field | 0.024 | - | ✓ | TermType::FFTerm | | -| Binary::Div | Unsigned_126 | Unsigned_126 | 402.7 | 3.5 | ✗ | TermType::BVTerm | [Field and bitvector logic mixing](https://github.com/AztecProtocol/aztec-packages/issues/11722) | -| Binary::Div | Signed_126 | Signed_126 | >17 days | 5.1 | ✗ | TermType::BVTerm | [Field and bitvector logic mixing](https://github.com/AztecProtocol/aztec-packages/issues/11722) | -| Binary::Eq | Field | Field | 19.2 | - | ✓ | TermType::FFTerm | | -| Binary::Eq | Unsigned_127 | Unsigned_127 | 22.8 | - | ✓ | TermType::BVTerm | | -| Binary::Lt | Unsigned_127 | Unsigned_127 | 56.7 | - | ✓ | TermType::BVTerm | | -| Binary::Mod | Unsigned_127 | Unsigned_127 | - | 3.2 | ✗ | TermType::BVTerm | [Field and bitvector logic mixing](https://github.com/AztecProtocol/aztec-packages/issues/11722) | -| Binary::Mul | Field | Field | 0.024 | - | ✓ | TermType::FFTerm | | -| Binary::Mul | Unsigned_127 | Unsigned_127 | 10.0 | - | ✓ | TermType::BVTerm | | -| Binary::Or | Unsigned_32 | Unsigned_32 | 18.0 | - | ✓ | TermType::BVTerm | | +| Binary::Add | Field | Field | 0.024 | - | ✓ | TermType::FFTerm | - | +| Binary::Add | Unsigned_127 | Unsigned_127 | 2.8 | - | ✓ | TermType::BVTerm | - | +| Binary::And | Unsigned_32 | Unsigned_32 | 6.7 | - | ✓ | TermType::BVTerm | - | +| Binary::And | Unsigned_127 | Unsigned_127 | 7.5 | - | ✗ | TermType::BVTerm | [smt solver lookup doesnt support 2bits tables](https://github.com/AztecProtocol/aztec-packages/issues/11721) | +| Binary::Div | Field | Field | 0.024 | - | ✓ | TermType::FFTerm | - | +| Binary::Div | Unsigned_126 | Unsigned_126 | >130 days | 20 | ✗ | TermType::BVTerm | Test takes too long | +| Binary::Div | Signed_126 | Signed_126 | >130 days | 20 | ✗ | TermType::BVTerm | Test takes too long | +| Binary::Eq | Field | Field | 19.2 | - | ✓ | TermType::FFTerm | - | +| Binary::Eq | Unsigned_127 | Unsigned_127 | 22.8 | - | ✓ | TermType::BVTerm | - | +| Binary::Lt | Unsigned_127 | Unsigned_127 | 56.7 | - | ✓ | TermType::BVTerm | - | +| Binary::Mod | Unsigned_127 | Unsigned_127 | >130 days | 3.2 | ✗ | TermType::BVTerm | Test takes too long | +| Binary::Mul | Field | Field | 0.024 | - | ✓ | TermType::FFTerm | - | +| Binary::Mul | Unsigned_127 | Unsigned_127 | 10.0 | - | ✓ | TermType::BVTerm | - | +| Binary::Or | Unsigned_32 | Unsigned_32 | 18.0 | - | ✓ | TermType::BVTerm | - | | Binary::Or | Unsigned_127 | Unsigned_127 | 7.5 | - | ✗ | TermType::BVTerm | [smt solver lookup doesnt support 2bits tables](https://github.com/AztecProtocol/aztec-packages/issues/11721) | -| Binary::Shl | Unsigned_64 | Unsigned_8 | 42331.61 | 63.2 | ✓ | TermType::BVTerm | | -| Binary::Shl | Unsigned_32 | Unsigned_8 | 4574.0 | 30 | ✓ | TermType::BVTerm | | -| Binary::Shr | Unsigned_64 | Unsigned_8 | 3927.88 | 10 | ✓ | TermType::BVTerm | | -| Binary::Sub | Unsigned_127 | Unsigned_127 | 3.3 | - | ✓ | TermType::BVTerm | | -| Binary::Xor | Unsigned_32 | Unsigned_32 | 14.7 | - | ✓ | TermType::BVTerm | | +| Binary::Shl | Unsigned_64 | Unsigned_8 | 42331.61 | 63.2 | ✓ | TermType::BVTerm | - | +| Binary::Shl | Unsigned_32 | Unsigned_8 | 4574.0 | 30 | ✓ | TermType::BVTerm | - | +| Binary::Shr | Unsigned_64 | Unsigned_8 | 3927.88 | 10 | ✓ | TermType::BVTerm | - | +| Binary::Sub | Unsigned_127 | Unsigned_127 | 3.3 | - | ✓ | TermType::BVTerm | - | +| Binary::Xor | Unsigned_32 | Unsigned_32 | 14.7 | - | ✓ | TermType::BVTerm | - | | Binary::Xor | Unsigned_127 | Unsigned_127 | 7.5 | - | ✗ | TermType::BVTerm | [smt solver lookup doesnt support 2bits tables](https://github.com/AztecProtocol/aztec-packages/issues/11721) | -| Not | Unsigned_127 | - | 0.2 | - | ✓ | TermType::BVTerm | | -| Cast | Field | Unsigned_64 | 0.05 | - | ✓ | TermType::FFTerm | | -| Cast | Unsigned_64 | Unsigned_8 | 0.07 | - | ✓ | TermType::BVTerm | | -| Cast | Unsigned_8 | Unsigned_64 | 0.6 | - | ✓ | TermType::BVTerm | | +| Not | Unsigned_127 | - | 0.2 | - | ✓ | TermType::BVTerm | - | +| Cast | Field | Unsigned_64 | 0.05 | - | ✓ | TermType::FFTerm | - | +| Cast | Unsigned_64 | Unsigned_8 | 0.07 | - | ✓ | TermType::BVTerm | - | +| Cast | Unsigned_8 | Unsigned_64 | 0.6 | - | ✓ | TermType::BVTerm | - | Each test attempts to find counterexamples that violate the expected behavior. A passing test indicates the operation is correctly implemented, while a failing test reveals potential issues. diff --git a/barretenberg/cpp/src/barretenberg/api/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/api/CMakeLists.txt index 201c87f41c90..363208fef964 100644 --- a/barretenberg/cpp/src/barretenberg/api/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/api/CMakeLists.txt @@ -1 +1,6 @@ -barretenberg_module(api client_ivc bbapi dsl libdeflate::libdeflate_static) +barretenberg_module(api client_ivc bbapi dsl libdeflate::libdeflate_static nlohmann_json::nlohmann_json) + +# Link avm_transpiler when library is provided +if(AVM_TRANSPILER_LIB) + target_link_libraries(api_objects PRIVATE avm_transpiler) +endif() diff --git a/barretenberg/cpp/src/barretenberg/api/acir_format_getters.cpp b/barretenberg/cpp/src/barretenberg/api/acir_format_getters.cpp index e2a6916c1a8b..2089ddc325a0 100644 --- a/barretenberg/cpp/src/barretenberg/api/acir_format_getters.cpp +++ b/barretenberg/cpp/src/barretenberg/api/acir_format_getters.cpp @@ -1,5 +1,5 @@ #include "barretenberg/api/file_io.hpp" -#include "barretenberg/api/get_bytecode.hpp" +#include "barretenberg/common/get_bytecode.hpp" #include "barretenberg/dsl/acir_format/acir_format.hpp" #include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" diff --git a/barretenberg/cpp/src/barretenberg/api/api.hpp b/barretenberg/cpp/src/barretenberg/api/api.hpp index a8097be92071..5fbaafca505a 100644 --- a/barretenberg/cpp/src/barretenberg/api/api.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api.hpp @@ -14,18 +14,18 @@ class API { // zero knowledge variant of the protocol by default std::filesystem::path crs_path{ "" }; // the location of reference strings for commitment schemes bool recursive{ false }; // deprecated flag indicating that a circuit is to be recursively verified - bool init_kzg_accumulator{ false }; // stripped down version fo `recursive` in the UltraHonk; also deprecated? - uint32_t honk_recursion{ 0 }; // flag that differentiates between different recursion modes; deprecated? bool ipa_accumulation{ false }; // indicate whether the command is doing IPA proof aggregation std::string scheme; // the proving system or IVC scheme std::string oracle_hash_type; // which hash function does the prover use as a random oracle? - std::string output_format; // output bytes, fields, both, or a msgpack buffer of fields std::string verifier_type; // is a verification key for use a single circuit verifier (e.g. a SNARK or folding // recursive verifier) or is it for an ivc verifier? bool write_vk{ false }; // should we addditionally write the verification key when writing the proof bool include_gates_per_opcode{ false }; // should we include gates_per_opcode in the gates command output bool slow_low_memory{ false }; // use file backed memory for polynomials - bool update_inputs{ false }; // use file backed memory for polynomials + std::string storage_budget; // storage budget for file backed memory (e.g. "500m", "2g") + bool update_inputs{ false }; // update inputs when check fails + + bool optimized_solidity_verifier{ false }; // should we use the optimized sol verifier? (temp) friend std::ostream& operator<<(std::ostream& os, const Flags& flags) { @@ -34,17 +34,14 @@ class API { << " debug: " << flags.debug << "\n" << " disable_zk: " << flags.disable_zk << "\n" << " crs_path: " << flags.crs_path << "\n" - << " recursive: " << flags.recursive << "\n" - << " init_kzg_accumulator: " << flags.init_kzg_accumulator << "\n" - << " honk_recursion: " << flags.honk_recursion << "\n" << " ipa_accumulation: " << flags.ipa_accumulation << "\n" << " scheme: " << flags.scheme << "\n" << " oracle_hash_type: " << flags.oracle_hash_type << "\n" - << " output_format: " << flags.output_format << "\n" << " verifier_type: " << flags.verifier_type << "\n" << " write_vk " << flags.write_vk << "\n" << " include_gates_per_opcode " << flags.include_gates_per_opcode << "\n" << " slow_low_memory " << flags.slow_low_memory << "\n" + << " storage_budget " << flags.storage_budget << "\n" << "]" << std::endl; return os; } diff --git a/barretenberg/cpp/src/barretenberg/api/api_avm.cpp b/barretenberg/cpp/src/barretenberg/api/api_avm.cpp index 6f7a848c3f64..d8cd25b8e9d1 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_avm.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_avm.cpp @@ -43,7 +43,7 @@ void avm_prove(const std::filesystem::path& inputs_path, const std::filesystem:: bool res = avm.verify(proof, inputs.publicInputs, vk); info("verification: ", res ? "success" : "failure"); if (!res) { - throw std::runtime_error("Generated proof is invalid!1!!1"); + throw std::runtime_error("Generated proof is invalid!!!!!"); } } @@ -75,5 +75,17 @@ bool avm_verify(const std::filesystem::path& proof_path, return res; } +void avm_simulate(const std::filesystem::path& inputs_path) +{ + // This includes input deserialization as well. + AVM_TRACK_TIME("command/avm_simulate", { + avm2::AvmAPI avm; + auto inputs = avm2::AvmAPI::ProvingInputs::from(read_file(inputs_path)); + avm.simulate(inputs.hints); + }); + + print_avm_stats(); +} + } // namespace bb #endif diff --git a/barretenberg/cpp/src/barretenberg/api/api_avm.hpp b/barretenberg/cpp/src/barretenberg/api/api_avm.hpp index cf8a4e8a5eb5..c183719d48eb 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_avm.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api_avm.hpp @@ -32,5 +32,14 @@ void avm_check_circuit(const std::filesystem::path& inputs_path); bool avm_verify(const std::filesystem::path& proof_path, const std::filesystem::path& public_inputs_path, const std::filesystem::path& vk_path); + +/** + * @brief Simulates an public transaction + * + * @param inputs_path Path to the file containing the serialised avm inputs + */ +// FIXME(fcarreiro): The inputs should not need to be the PROVING inputs. +void avm_simulate(const std::filesystem::path& inputs_path); + } // namespace bb #endif diff --git a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.cpp b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.cpp index 73284ece484c..4b1467d18616 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.cpp @@ -1,17 +1,16 @@ #include "api_client_ivc.hpp" #include "barretenberg/api/file_io.hpp" -#include "barretenberg/api/get_bytecode.hpp" #include "barretenberg/api/log.hpp" -#include "barretenberg/api/write_prover_output.hpp" #include "barretenberg/bbapi/bbapi.hpp" #include "barretenberg/client_ivc/client_ivc.hpp" #include "barretenberg/client_ivc/mock_circuit_producer.hpp" #include "barretenberg/client_ivc/private_execution_steps.hpp" +#include "barretenberg/common/get_bytecode.hpp" #include "barretenberg/common/map.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/common/try_catch_shim.hpp" #include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" -#include "barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp" +#include "barretenberg/dsl/acir_format/pg_recursion_constraint.hpp" #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/serialize/msgpack_check_eq.hpp" #include @@ -28,80 +27,30 @@ namespace { // anonymous namespace * * @param bytecode_path * @param witness_path + * @param use_structured_trace Whether to utilize structured trace when computing VK for circuit */ -void write_standalone_vk(const std::string& output_format, - const std::filesystem::path& bytecode_path, - const std::filesystem::path& output_path) +void write_standalone_vk(std::vector bytecode, + const std::filesystem::path& output_path, + bool use_structured_trace = true) { - auto bytecode = get_bytecode(bytecode_path); + auto trace_settings = use_structured_trace ? TraceSettings{ AZTEC_TRACE_STRUCTURE } : TraceSettings{}; auto response = bbapi::ClientIvcComputeStandaloneVk{ .circuit = { .name = "standalone_circuit", .bytecode = std::move(bytecode) } - }.execute(); + }.execute({ .trace_settings = trace_settings }); - bool wrote_file = false; bool is_stdout = output_path == "-"; - auto write_fn = [&](const std::filesystem::path& path, const auto& data) { - if (is_stdout) { - write_bytes_to_stdout(data); - } else { - write_file(path, data); - } - }; - if (output_format == "bytes_and_fields" && is_stdout) { - throw_or_abort("Cannot write to stdout in bytes_and_fields format."); - } - if (output_format == "bytes" || output_format == "bytes_and_fields") { - write_fn(output_path / "vk", response.bytes); - wrote_file = true; - } - if (output_format == "fields" || output_format == "bytes_and_fields") { - std::string json = field_elements_to_json(response.fields); - write_fn(output_path / "vk_fields.json", std::vector(json.begin(), json.end())); - wrote_file = true; - } - if (!wrote_file) { - throw_or_abort("Unsupported output format for standalone vk: " + output_format); - } -} - -void write_civc_vk(const std::string& output_format, - size_t num_public_inputs_in_final_circuit, - const std::filesystem::path& output_dir) -{ - if (output_format != "bytes") { - throw_or_abort("Unsupported output format for ClientIVC vk: " + output_format); - } - - // Since we need to specify the number of public inputs but ClientIvcComputeIvcVk derives it from bytecode, - // we need to create a mock circuit with the correct number of public inputs - // For now, we'll use the compute_civc_vk function directly as it was designed for this purpose - bbapi::BBApiRequest request; - auto vk = bbapi::compute_civc_vk(request, num_public_inputs_in_final_circuit); - const auto buf = to_buffer(vk); - - const bool output_to_stdout = output_dir == "-"; - - if (output_to_stdout) { - write_bytes_to_stdout(buf); + if (is_stdout) { + write_bytes_to_stdout(response.bytes); } else { - write_file(output_dir / "vk", buf); + write_file(output_path / "vk", response.bytes); } } - -void write_civc_vk(const std::string& output_data_type, - const std::string& bytecode_path, - const std::filesystem::path& output_dir) +void write_civc_vk(std::vector bytecode, const std::filesystem::path& output_dir) { - if (output_data_type != "bytes") { - throw_or_abort("Unsupported output format for ClientIVC vk: " + output_data_type); - } - - auto bytecode = get_bytecode(bytecode_path); - - auto response = bbapi::ClientIvcComputeIvcVk{ - .circuit = { .name = "final_circuit", .bytecode = std::move(bytecode) } - }.execute(); - + // compute the hiding kernel's vk + info("ClientIVC: computing IVC vk for hiding kernel circuit"); + auto response = + bbapi::ClientIvcComputeIvcVk{ .circuit{ .bytecode = std::move(bytecode) } }.execute({ .trace_settings = {} }); const bool output_to_stdout = output_dir == "-"; if (output_to_stdout) { write_bytes_to_stdout(response.bytes); @@ -115,20 +64,18 @@ void ClientIVCAPI::prove(const Flags& flags, const std::filesystem::path& input_path, const std::filesystem::path& output_dir) { - + BB_BENCH_NAME("ClientIVCAPI::prove"); bbapi::BBApiRequest request; std::vector raw_steps = PrivateExecutionStepRaw::load_and_decompress(input_path); bbapi::ClientIvcStart{ .num_circuits = raw_steps.size() }.execute(request); - - size_t loaded_circuit_public_inputs_size = 0; + info("ClientIVC: starting with ", raw_steps.size(), " circuits"); for (const auto& step : raw_steps) { bbapi::ClientIvcLoad{ .circuit = { .name = step.function_name, .bytecode = step.bytecode, .verification_key = step.vk } }.execute(request); // NOLINTNEXTLINE(bugprone-unchecked-optional-access): we know the optional has been set here. - loaded_circuit_public_inputs_size = request.loaded_circuit_constraints->public_inputs.size(); info("ClientIVC: accumulating " + step.function_name); bbapi::ClientIvcAccumulate{ .witness = step.witness }.execute(request); } @@ -140,13 +87,13 @@ void ClientIVCAPI::prove(const Flags& flags, const bool output_to_stdout = output_dir == "-"; const auto write_proof = [&]() { - const auto buf = to_buffer(proof); + const auto buf = to_buffer(proof.to_field_elements()); if (output_to_stdout) { vinfo("writing ClientIVC proof to stdout"); write_bytes_to_stdout(buf); } else { vinfo("writing ClientIVC proof in directory ", output_dir); - proof.to_file_msgpack(output_dir / "proof"); + write_file(output_dir / "proof", buf); } }; @@ -154,7 +101,8 @@ void ClientIVCAPI::prove(const Flags& flags, if (flags.write_vk) { vinfo("writing ClientIVC vk in directory ", output_dir); - write_civc_vk("bytes", loaded_circuit_public_inputs_size, output_dir); + // write CIVC vk using the bytecode of the hiding circuit (the last step of the execution) + write_civc_vk(raw_steps[raw_steps.size() - 1].bytecode, output_dir); } } @@ -163,8 +111,12 @@ bool ClientIVCAPI::verify([[maybe_unused]] const Flags& flags, const std::filesystem::path& proof_path, const std::filesystem::path& vk_path) { - auto proof = ClientIVC::Proof::from_file_msgpack(proof_path); + BB_BENCH_NAME("ClientIVCAPI::verify"); + auto proof_fields = many_from_buffer(read_file(proof_path)); + auto proof = ClientIVC::Proof::from_field_elements(proof_fields); + auto vk_buffer = read_file(vk_path); + auto response = bbapi::ClientIvcVerify{ .proof = std::move(proof), .vk = std::move(vk_buffer) }.execute(); return response.valid; } @@ -172,20 +124,20 @@ bool ClientIVCAPI::verify([[maybe_unused]] const Flags& flags, // WORKTODO(bbapi) remove this bool ClientIVCAPI::prove_and_verify(const std::filesystem::path& input_path) { - PrivateExecutionSteps steps; steps.parse(PrivateExecutionStepRaw::load_and_decompress(input_path)); std::shared_ptr ivc = steps.accumulate(); // Construct the hiding kernel as the final step of the IVC - ClientIVC::ClientCircuit circuit{ ivc->goblin.op_queue }; - ivc->complete_kernel_circuit_logic(circuit); - const bool verified = ivc->prove_and_verify(); + + auto proof = ivc->prove(); + const bool verified = ClientIVC::verify(proof, ivc->get_vk()); return verified; } void ClientIVCAPI::gates(const Flags& flags, const std::filesystem::path& bytecode_path) { + BB_BENCH_NAME("ClientIVCAPI::gates"); gate_count_for_ivc(bytecode_path, flags.include_gates_per_opcode); } @@ -193,11 +145,13 @@ void ClientIVCAPI::write_solidity_verifier([[maybe_unused]] const Flags& flags, [[maybe_unused]] const std::filesystem::path& output_path, [[maybe_unused]] const std::filesystem::path& vk_path) { + BB_BENCH_NAME("ClientIVCAPI::write_solidity_verifier"); throw_or_abort("API function contract not implemented"); } bool ClientIVCAPI::check_precomputed_vks(const Flags& flags, const std::filesystem::path& input_path) { + BB_BENCH_NAME("ClientIVCAPI::check_precomputed_vks"); bbapi::BBApiRequest request; std::vector raw_steps = PrivateExecutionStepRaw::load_and_decompress(input_path); @@ -230,11 +184,15 @@ void ClientIVCAPI::write_vk(const Flags& flags, const std::filesystem::path& bytecode_path, const std::filesystem::path& output_path) { - + BB_BENCH_NAME("ClientIVCAPI::write_vk"); + auto bytecode = get_bytecode(bytecode_path); if (flags.verifier_type == "ivc") { - write_civc_vk(flags.output_format, bytecode_path, output_path); + write_civc_vk(bytecode, output_path); } else if (flags.verifier_type == "standalone") { - write_standalone_vk(flags.output_format, bytecode_path, output_path); + write_standalone_vk(bytecode, output_path); + } else if (flags.verifier_type == "standalone_hiding") { + // write the VK for the hiding kernel which DOES NOT utilize a structured trace + write_standalone_vk(bytecode, output_path, false); } else { const std::string msg = std::string("Can't write vk for verifier type ") + flags.verifier_type; throw_or_abort(msg); @@ -251,13 +209,14 @@ bool ClientIVCAPI::check([[maybe_unused]] const Flags& flags, void gate_count_for_ivc(const std::string& bytecode_path, bool include_gates_per_opcode) { + BB_BENCH_NAME("gate_count_for_ivc"); // All circuit reports will be built into the std::string below std::string functions_string = "{\"functions\": [\n "; bbapi::BBApiRequest request{ .trace_settings = { AZTEC_TRACE_STRUCTURE } }; auto bytecode = get_bytecode(bytecode_path); - auto response = bbapi::ClientIvcGates{ .circuit = { .name = "ivc_circuit", .bytecode = std::move(bytecode) }, + auto response = bbapi::ClientIvcStats{ .circuit = { .name = "ivc_circuit", .bytecode = std::move(bytecode) }, .include_gates_per_opcode = include_gates_per_opcode } .execute(request); @@ -283,25 +242,4 @@ void gate_count_for_ivc(const std::string& bytecode_path, bool include_gates_per std::cout << format(functions_string, "\n]}"); } -void write_arbitrary_valid_client_ivc_proof_and_vk_to_file(const std::filesystem::path& output_dir) -{ - - const size_t NUM_CIRCUITS = 2; - ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - - // Construct and accumulate a series of mocked private function execution circuits - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - - ClientIVC::Proof proof = ivc.prove(); - - // Write the proof and verification keys into the working directory in 'binary' format - vinfo("writing ClientIVC proof and vk..."); - proof.to_file_msgpack(output_dir / "proof"); - - write_file(output_dir / "vk", to_buffer(ivc.get_vk())); -} - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.hpp b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.hpp index 7ff8f9f8aade..e5fdfa9b8af1 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.hpp @@ -42,8 +42,6 @@ class ClientIVCAPI : public API { void gate_count_for_ivc(const std::string& bytecode_path, bool include_gates_per_opcode); -void write_arbitrary_valid_client_ivc_proof_and_vk_to_file(const std::filesystem::path& output_dir); - std::vector decompress(const void* bytes, size_t size); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.test.cpp b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.test.cpp index cf86586c4095..05c8b18f12f7 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_client_ivc.test.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_client_ivc.test.cpp @@ -37,6 +37,8 @@ std::filesystem::path get_test_dir(const std::string_view& test_name) return temp_dir / test_name; } +// TODO(https://github.com/AztecProtocol/barretenberg/issues/1509): expand these test to accomodate a more realistic +// CIVC flow in order to re-enable the ProveAndVerify* tests in this file void create_test_private_execution_steps(const std::filesystem::path& output_path) { using namespace acir_format; @@ -49,8 +51,10 @@ void create_test_private_execution_steps(const std::filesystem::path& output_pat auto app_vk_response = bbapi::ClientIvcComputeStandaloneVk{ .circuit = { .name = "app_circuit", .bytecode = app_bytecode } }.execute(); - auto app_vk = app_vk_response.bytes; - auto app_vk_fields = from_buffer(app_vk).to_field_elements(); + + // Decode VK to get field elements + auto app_vk = from_buffer(app_vk_response.bytes); + auto app_vk_fields = app_vk.to_field_elements(); // Now create a kernel circuit that verifies the app circuit auto kernel_bytecode = acir_bincode_mocks::create_simple_kernel(app_vk_fields.size(), /*is_init_kernel=*/true); @@ -63,8 +67,10 @@ void create_test_private_execution_steps(const std::filesystem::path& output_pat // Create PrivateExecutionStepRaw for the kernel std::vector raw_steps; - raw_steps.push_back( - { .bytecode = app_bytecode, .witness = app_witness_data, .vk = app_vk, .function_name = "app_function" }); + raw_steps.push_back({ .bytecode = app_bytecode, + .witness = app_witness_data, + .vk = app_vk_response.bytes, + .function_name = "app_function" }); raw_steps.push_back({ .bytecode = kernel_bytecode, .witness = kernel_witness_data, .vk = kernel_vk, @@ -96,35 +102,38 @@ class ClientIVCAPITests : public ::testing::Test { namespace bb { std::vector compress(const std::vector& input); -} +} // namespace bb -// Used to get a mock IVC vk. +// Helper to get an IVC verification key for testing ClientIVC::MegaVerificationKey get_ivc_vk(const std::filesystem::path& test_dir) { auto [app_bytecode, app_witness_data] = acir_bincode_mocks::create_simple_circuit_bytecode(); bbapi::BBApiRequest request; auto app_vk_response = bbapi::ClientIvcComputeStandaloneVk{ .circuit = { .name = "app_circuit", .bytecode = app_bytecode } }.execute(); - auto app_vk = app_vk_response.bytes; - auto app_vk_fields = from_buffer(app_vk).to_field_elements(); - // Use this to get the size of the vk. - auto bytecode = acir_bincode_mocks::create_simple_kernel(app_vk_fields.size(), /*is_init_kernel=*/false); + + // Decode to get the field count + auto app_vk = from_buffer(app_vk_response.bytes); + size_t vk_field_count = app_vk.to_field_elements().size(); + + // Create a kernel circuit with the correct VK size + auto bytecode = acir_bincode_mocks::create_simple_kernel(vk_field_count, /*is_init_kernel=*/false); std::filesystem::path bytecode_path = test_dir / "circuit.acir"; write_file(bytecode_path, bb::compress(bytecode)); ClientIVCAPI::Flags write_vk_flags; write_vk_flags.verifier_type = "ivc"; - write_vk_flags.output_format = "bytes"; ClientIVCAPI api; api.write_vk(write_vk_flags, bytecode_path, test_dir); - return from_buffer(read_file(test_dir / "vk")); + auto buffer = read_file(test_dir / "vk"); + return from_buffer(buffer); }; // Test the ClientIVCAPI::prove flow, making sure --write_vk // returns the same output as our ivc VK generation. -TEST_F(ClientIVCAPITests, ProveAndVerifyFileBasedFlow) +TEST_F(ClientIVCAPITests, DISABLED_ProveAndVerifyFileBasedFlow) { auto ivc_vk = get_ivc_vk(test_dir); @@ -147,7 +156,7 @@ TEST_F(ClientIVCAPITests, ProveAndVerifyFileBasedFlow) auto verify_vk_equivalence = [&](const std::filesystem::path& vk1_path, const ClientIVC::MegaVerificationKey& vk2) { auto vk1_data = read_file(vk1_path); auto vk1 = from_buffer(vk1_data); - ASSERT_TRUE(msgpack::msgpack_check_eq(vk1, vk2, "VK from prove should match VK from write_vk")); + ASSERT_EQ(vk1, vk2); }; // Helper lambda to verify proof @@ -178,20 +187,39 @@ TEST_F(ClientIVCAPITests, WriteVkFieldsSmokeTest) std::filesystem::path bytecode_path = test_dir / "circuit.acir"; write_file(bytecode_path, bb::compress(bytecode)); - // Test write_vk with fields output format + // Test write_vk ClientIVCAPI::Flags flags; flags.verifier_type = "standalone"; - flags.output_format = "fields"; ClientIVCAPI api; api.write_vk(flags, bytecode_path, test_dir); - // Read and verify the fields format - auto vk_data = read_file(test_dir / "vk_fields.json"); - std::string vk_str(vk_data.begin(), vk_data.end()); - // Just check that this looks a bit like JSON. - EXPECT_NE(vk_str.find('['), std::string::npos); - EXPECT_NE(vk_str.find(']'), std::string::npos); + // Verify the binary VK file was created + EXPECT_TRUE(std::filesystem::exists(test_dir / "vk")); +} + +TEST_F(ClientIVCAPITests, WriteIVCVkSmokeTest) +{ + // Create a simple circuit bytecode + auto [bytecode, witness_data] = acir_bincode_mocks::create_simple_circuit_bytecode(); + + // Compress and write bytecode to file + std::filesystem::path bytecode_path = test_dir / "circuit.acir"; + write_file(bytecode_path, bb::compress(bytecode)); + + // Set flags for VK generation + ClientIVCAPI::Flags flags; + flags.verifier_type = "ivc"; + + // Call write_vk + ClientIVCAPI api; + api.write_vk(flags, bytecode_path, test_dir); + + // Check that VK file exists and is non-empty + std::filesystem::path vk_path = test_dir / "vk"; + ASSERT_TRUE(std::filesystem::exists(vk_path)); + auto vk_data = read_file(vk_path); + ASSERT_FALSE(vk_data.empty()); } // TODO(https://github.com/AztecProtocol/barretenberg/issues/1461): Make this test actually test # gates @@ -233,7 +261,7 @@ TEST_F(ClientIVCAPITests, GatesCommandSmokeTest) } // Test prove_and_verify for our example IVC flow. -TEST_F(ClientIVCAPITests, ProveAndVerifyCommand) +TEST_F(ClientIVCAPITests, DISABLED_ProveAndVerifyCommand) { // Create test input file std::filesystem::path input_path = test_dir / "input.msgpack"; diff --git a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp index d1a1e82ca045..47cbeeeb2428 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp @@ -1,170 +1,48 @@ #include "api_ultra_honk.hpp" -#include "barretenberg/api/acir_format_getters.hpp" #include "barretenberg/api/file_io.hpp" -#include "barretenberg/api/gate_count.hpp" -#include "barretenberg/api/write_prover_output.hpp" +#include "barretenberg/bbapi/bbapi_ultra_honk.hpp" +#include "barretenberg/common/bb_bench.hpp" +#include "barretenberg/common/get_bytecode.hpp" #include "barretenberg/common/map.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" #include "barretenberg/dsl/acir_format/proof_surgeon.hpp" #include "barretenberg/dsl/acir_proofs/honk_contract.hpp" +#include "barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp" #include "barretenberg/dsl/acir_proofs/honk_zk_contract.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/special_public_inputs/special_public_inputs.hpp" #include "barretenberg/srs/global_crs.hpp" +#include +#include +#include namespace bb { -template -Circuit _compute_circuit(const std::string& bytecode_path, const std::string& witness_path) -{ - uint32_t honk_recursion = 0; - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1326): Get rid of honk_recursion and just use - // ipa_accumulation. - // bool ipa_accumulation = false; - if constexpr (IsAnyOf) { - honk_recursion = 1; - } else if constexpr (IsAnyOf) { - honk_recursion = 2; - // ipa_accumulation = true; - } -#ifdef STARKNET_GARAGA_FLAVORS - if constexpr (IsAnyOf) { - honk_recursion = 1; - } -#endif - - const acir_format::ProgramMetadata metadata{ - .honk_recursion = honk_recursion, - }; - acir_format::AcirProgram program{ get_constraint_system(bytecode_path) }; - - if (!witness_path.empty()) { - program.witness = get_witness(witness_path); - } - return acir_format::create_circuit(program, metadata); -} +namespace { -template -std::shared_ptr> _compute_proving_key(const std::string& bytecode_path, - const std::string& witness_path) +void write_vk_outputs(const bbapi::CircuitComputeVk::Response& vk_response, const std::filesystem::path& output_dir) { - typename Flavor::CircuitBuilder builder = _compute_circuit(bytecode_path, witness_path); - auto decider_proving_key = std::make_shared>(builder); - return decider_proving_key; + write_file(output_dir / "vk", vk_response.bytes); + info("VK saved to ", output_dir / "vk"); + write_file(output_dir / "vk_hash", vk_response.hash); + info("VK Hash saved to ", output_dir / "vk_hash"); } -template -PubInputsProofAndKey _compute_vk(const std::filesystem::path& bytecode_path, - const std::filesystem::path& witness_path) +void write_proof_outputs(const bbapi::CircuitProve::Response& prove_response, const std::filesystem::path& output_dir) { - using Proof = typename Flavor::Transcript::Proof; + auto public_inputs_buf = to_buffer(prove_response.public_inputs); + auto proof_buf = to_buffer(prove_response.proof); - auto proving_key = _compute_proving_key(bytecode_path.string(), witness_path.string()); - auto vk = std::make_shared(proving_key->get_precomputed()); - return { PublicInputsVector{}, Proof{}, vk, vk->hash() }; + write_file(output_dir / "public_inputs", public_inputs_buf); + write_file(output_dir / "proof", proof_buf); + info("Public inputs saved to ", output_dir / "public_inputs"); + info("Proof saved to ", output_dir / "proof"); } -template -PubInputsProofAndKey _prove(const bool compute_vk, - const std::filesystem::path& bytecode_path, - const std::filesystem::path& witness_path, - const std::filesystem::path& vk_path) -{ - using Proof = typename Flavor::Transcript::Proof; - - auto proving_key = _compute_proving_key(bytecode_path.string(), witness_path.string()); - std::shared_ptr vk; - if (compute_vk) { - info("WARNING: computing verification key while proving. Pass in a precomputed vk for better performance."); - vk = std::make_shared(proving_key->get_precomputed()); - } else { - vk = std::make_shared( - from_buffer(read_file(vk_path))); - } - - UltraProver_ prover{ proving_key, vk }; - - Proof concat_pi_and_proof = prover.construct_proof(); - // Compute number of inner public inputs. Perform loose checks that the public inputs contain enough data. - auto num_inner_public_inputs = [&]() { - size_t num_public_inputs = prover.proving_key->num_public_inputs(); - if constexpr (HasIPAAccumulator) { - BB_ASSERT_GTE(num_public_inputs, - RollupIO::PUBLIC_INPUTS_SIZE, - "Public inputs should contain a pairing point accumulator and an IPA claim."); - return num_public_inputs - RollupIO::PUBLIC_INPUTS_SIZE; - } else { - BB_ASSERT_GTE(num_public_inputs, - DefaultIO::PUBLIC_INPUTS_SIZE, - "Public inputs should contain a pairing point accumulator."); - return num_public_inputs - DefaultIO::PUBLIC_INPUTS_SIZE; - } - }(); - - // We split the inner public inputs, which are stored at the front of the proof, from the rest of the proof. Now, - // the "proof" refers to everything except the inner public inputs. - PublicInputsAndProof public_inputs_and_proof{ - PublicInputsVector(concat_pi_and_proof.begin(), - concat_pi_and_proof.begin() + static_cast(num_inner_public_inputs)), - Proof(concat_pi_and_proof.begin() + static_cast(num_inner_public_inputs), - concat_pi_and_proof.end()) - }; - return { public_inputs_and_proof.public_inputs, public_inputs_and_proof.proof, vk, vk->hash() }; -} - -template -bool _verify(const std::filesystem::path& public_inputs_path, - const std::filesystem::path& proof_path, - const std::filesystem::path& vk_path) -{ - using VerificationKey = typename Flavor::VerificationKey; - using Verifier = UltraVerifier_; - using Transcript = typename Flavor::Transcript; - using DataType = typename Transcript::DataType; - using Proof = typename Transcript::Proof; - - auto vk = std::make_shared(from_buffer(read_file(vk_path))); - auto public_inputs = many_from_buffer(read_file(public_inputs_path)); - auto proof = many_from_buffer(read_file(proof_path)); - // concatenate public inputs and proof - std::vector complete_proof = public_inputs; - complete_proof.insert(complete_proof.end(), proof.begin(), proof.end()); - - VerifierCommitmentKey ipa_verification_key; - if constexpr (HasIPAAccumulator) { - ipa_verification_key = VerifierCommitmentKey(1 << CONST_ECCVM_LOG_N); - } - - Verifier verifier{ vk, ipa_verification_key }; - - bool verified = false; - if constexpr (HasIPAAccumulator) { - const size_t HONK_PROOF_LENGTH = Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() - IPA_PROOF_LENGTH; - const size_t num_public_inputs = static_cast(vk->num_public_inputs); - // The extra calculation is for the IPA proof length. - BB_ASSERT_EQ(complete_proof.size(), - HONK_PROOF_LENGTH + IPA_PROOF_LENGTH + num_public_inputs, - "Honk proof has incorrect length while verifying."); - const std::ptrdiff_t honk_proof_with_pub_inputs_length = - static_cast(HONK_PROOF_LENGTH + num_public_inputs); - auto ipa_proof = Proof(complete_proof.begin() + honk_proof_with_pub_inputs_length, complete_proof.end()); - auto tube_honk_proof = - Proof(complete_proof.begin(), complete_proof.begin() + honk_proof_with_pub_inputs_length); - verified = verifier.template verify_proof(complete_proof, ipa_proof).result; - } else { - verified = verifier.template verify_proof(complete_proof).result; - } - - if (verified) { - info("Proof verified successfully"); - } else { - info("Proof verification failed"); - } - - return verified; -} +} // anonymous namespace bool UltraHonkAPI::check([[maybe_unused]] const Flags& flags, [[maybe_unused]] const std::filesystem::path& bytecode_path, @@ -180,31 +58,38 @@ void UltraHonkAPI::prove(const Flags& flags, const std::filesystem::path& vk_path, const std::filesystem::path& output_dir) { - const auto _write = [&](auto&& _prove_output) { - write(_prove_output, flags.output_format, flags.write_vk ? "proof_and_vk" : "proof", output_dir); - }; - // if the ipa accumulation flag is set we are using the UltraRollupFlavor - if (flags.ipa_accumulation) { - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); - } else if (flags.oracle_hash_type == "poseidon2" && !flags.disable_zk) { - // if we are not disabling ZK and the oracle hash type is poseidon2, we are using the UltraZKFlavor - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); - } else if (flags.oracle_hash_type == "poseidon2" && flags.disable_zk) { - // if we are disabling ZK and the oracle hash type is poseidon2, we are using the UltraFlavor - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); - } else if (flags.oracle_hash_type == "keccak" && !flags.disable_zk) { - // if we are not disabling ZK and the oracle hash type is keccak, we are using the UltraKeccakZKFlavor - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); - } else if (flags.oracle_hash_type == "keccak" && flags.disable_zk) { - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); -#ifdef STARKNET_GARAGA_FLAVORS - } else if (flags.oracle_hash_type == "starknet" && flags.disable_zk) { - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); - } else if (flags.oracle_hash_type == "starknet" && !flags.disable_zk) { - _write(_prove(flags.write_vk, bytecode_path, witness_path, vk_path)); -#endif - } else { - throw_or_abort("Invalid proving options specified in _prove"); + BB_BENCH_NAME("UltraHonkAPI::prove"); + // Validate output directory + if (output_dir == "-") { + throw_or_abort("Stdout output is not supported. Please specify an output directory."); + } + + // Convert flags to ProofSystemSettings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, + .oracle_hash_type = flags.oracle_hash_type, + .disable_zk = flags.disable_zk }; + + // Read input files + auto bytecode = get_bytecode(bytecode_path); + auto witness = get_bytecode(witness_path); + + // Handle VK + std::vector vk_bytes; + + if (!vk_path.empty() && !flags.write_vk) { + vk_bytes = read_file(vk_path); + } + + // Prove + auto response = bbapi::CircuitProve{ .circuit = { .name = "circuit", + .bytecode = std::move(bytecode), + .verification_key = std::move(vk_bytes) }, + .witness = std::move(witness), + .settings = std::move(settings) } + .execute(); + write_proof_outputs(response, output_dir); + if (flags.write_vk) { + write_vk_outputs(response.vk, output_dir); } } @@ -213,27 +98,25 @@ bool UltraHonkAPI::verify(const Flags& flags, const std::filesystem::path& proof_path, const std::filesystem::path& vk_path) { - const bool ipa_accumulation = flags.ipa_accumulation; - // if the ipa accumulation flag is set we are using the UltraRollupFlavor - if (ipa_accumulation) { - return _verify(public_inputs_path, proof_path, vk_path); - } else if (flags.oracle_hash_type == "poseidon2" && !flags.disable_zk) { - return _verify(public_inputs_path, proof_path, vk_path); - } else if (flags.oracle_hash_type == "poseidon2" && flags.disable_zk) { - return _verify(public_inputs_path, proof_path, vk_path); - } else if (flags.oracle_hash_type == "keccak" && !flags.disable_zk) { - return _verify(public_inputs_path, proof_path, vk_path); - } else if (flags.oracle_hash_type == "keccak" && flags.disable_zk) { - return _verify(public_inputs_path, proof_path, vk_path); -#ifdef STARKNET_GARAGA_FLAVORS - } else if (flags.oracle_hash_type == "starknet" && !flags.disable_zk) { - return _verify(ipa_accumulation, public_inputs_path, proof_path, vk_path); - } else if (flags.oracle_hash_type == "starknet" && flags.disable_zk) { - return _verify(ipa_accumulation, public_inputs_path, proof_path, vk_path); -#endif - } else { - throw_or_abort("invalid proof type in _verify"); - } + BB_BENCH_NAME("UltraHonkAPI::verify"); + // Read input files + auto public_inputs = many_from_buffer(read_file(public_inputs_path)); + auto proof = many_from_buffer(read_file(proof_path)); + auto vk_bytes = read_file(vk_path); + + // Convert flags to ProofSystemSettings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, + .oracle_hash_type = flags.oracle_hash_type, + .disable_zk = flags.disable_zk }; + + // Execute verify command + auto response = bbapi::CircuitVerify{ .verification_key = std::move(vk_bytes), + .public_inputs = std::move(public_inputs), + .proof = std::move(proof), + .settings = settings } + .execute(); + + return response.verified; } bool UltraHonkAPI::prove_and_verify([[maybe_unused]] const Flags& flags, @@ -246,49 +129,103 @@ bool UltraHonkAPI::prove_and_verify([[maybe_unused]] const Flags& flags, void UltraHonkAPI::write_vk(const Flags& flags, const std::filesystem::path& bytecode_path, - const std::filesystem::path& output_path) + const std::filesystem::path& output_dir) { - const auto _write = [&](auto&& _prove_output) { write(_prove_output, flags.output_format, "vk", output_path); }; - - if (flags.ipa_accumulation) { - _write(_compute_vk(bytecode_path, "")); - } else if (flags.oracle_hash_type == "poseidon2" && !flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); - } else if (flags.oracle_hash_type == "poseidon2" && flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); - } else if (flags.oracle_hash_type == "keccak" && !flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); - } else if (flags.oracle_hash_type == "keccak" && flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); -#ifdef STARKNET_GARAGA_FLAVORS - } else if (flags.oracle_hash_type == "starknet" && !flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); - } else if (flags.oracle_hash_type == "starknet" && flags.disable_zk) { - _write(_compute_vk(bytecode_path, "")); -#endif - } else { - throw_or_abort("invalid proof type in _write_vk"); + BB_BENCH_NAME("UltraHonkAPI::write_vk"); + // Validate output directory + if (output_dir == "-") { + throw_or_abort("Stdout output is not supported. Please specify an output directory."); } + + // Read bytecode + auto bytecode = get_bytecode(bytecode_path); + + // Convert flags to ProofSystemSettings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, + .oracle_hash_type = flags.oracle_hash_type, + .disable_zk = flags.disable_zk }; + + auto response = bbapi::CircuitComputeVk{ .circuit = { .name = "circuit", .bytecode = std::move(bytecode) }, + .settings = settings } + .execute(); + + write_vk_outputs(response, output_dir); } void UltraHonkAPI::gates([[maybe_unused]] const Flags& flags, [[maybe_unused]] const std::filesystem::path& bytecode_path) { - gate_count(bytecode_path, /*useless=*/false, flags.honk_recursion, flags.include_gates_per_opcode); + BB_BENCH_NAME("UltraHonkAPI::gates"); + // Get the bytecode directly + auto bytecode = get_bytecode(bytecode_path); + + // All circuit reports will be built into the string below + std::string functions_string = "{\"functions\": [\n "; + + // For now, treat the entire bytecode as a single circuit + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1074): Handle multi-circuit programs properly + // Convert flags to ProofSystemSettings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, + .oracle_hash_type = flags.oracle_hash_type, + .disable_zk = flags.disable_zk }; + + // Execute CircuitStats command + auto response = bbapi::CircuitStats{ .circuit = { .name = "circuit", .bytecode = bytecode, .verification_key = {} }, + .include_gates_per_opcode = flags.include_gates_per_opcode, + .settings = settings } + .execute(); + + vinfo("Calculated circuit size in gate_count: ", response.num_gates); + + // Build individual circuit report to match original gate_count output + std::string gates_per_opcode_str; + if (flags.include_gates_per_opcode) { + size_t i = 0; + for (size_t count : response.gates_per_opcode) { + if (i != 0) { + gates_per_opcode_str += ","; + } + gates_per_opcode_str += std::to_string(count); + i++; + } + } + + // For now, we'll use the CircuitStats response which includes circuit statistics + // The num_acir_opcodes is not directly available from bytecode alone + auto result_string = format( + "{\n \"acir_opcodes\": ", + response.num_acir_opcodes, + ",\n \"circuit_size\": ", + response.num_gates, + (flags.include_gates_per_opcode ? format(",\n \"gates_per_opcode\": [", gates_per_opcode_str, "]") : ""), + "\n }"); + + functions_string = format(functions_string, result_string); + std::cout << format(functions_string, "\n]}"); } void UltraHonkAPI::write_solidity_verifier(const Flags& flags, const std::filesystem::path& output_path, const std::filesystem::path& vk_path) { - using VK = UltraKeccakFlavor::VerificationKey; - auto vk = std::make_shared(from_buffer(read_file(vk_path))); - std::string contract = flags.disable_zk ? get_honk_solidity_verifier(vk) : get_honk_zk_solidity_verifier(vk); + BB_BENCH_NAME("UltraHonkAPI::write_solidity_verifier"); + // Read VK file + auto vk_bytes = read_file(vk_path); + // Convert flags to ProofSystemSettings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, + .oracle_hash_type = flags.oracle_hash_type, + .disable_zk = flags.disable_zk, + .optimized_solidity_verifier = flags.optimized_solidity_verifier }; + + // Execute solidity verifier command + auto response = bbapi::CircuitWriteSolidityVerifier{ .verification_key = vk_bytes, .settings = settings }.execute(); + + // Write output if (output_path == "-") { - std::cout << contract; + std::cout << response.solidity_code; } else { - write_file(output_path, { contract.begin(), contract.end() }); + write_file(output_path, { response.solidity_code.begin(), response.solidity_code.end() }); if (flags.disable_zk) { info("Honk solidity verifier saved to ", output_path); } else { @@ -296,37 +233,4 @@ void UltraHonkAPI::write_solidity_verifier(const Flags& flags, } } } - -template -void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path) -{ - using Prover = UltraProver_; - using VerificationKey = typename Flavor::VerificationKey; - using FF = typename Flavor::FF; - - std::shared_ptr> proving_key = _compute_proving_key(bytecode_path, witness_path); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - Prover prover{ proving_key, verification_key }; - std::vector proof = prover.construct_proof(); - - bool ipa_accumulation = false; - if constexpr (IsAnyOf) { - ipa_accumulation = true; - } - const std::string toml_content = - acir_format::ProofSurgeon::construct_recursion_inputs_toml_data(proof, verification_key, ipa_accumulation); - - const std::string toml_path = output_path + "/Prover.toml"; - write_file(toml_path, { toml_content.begin(), toml_content.end() }); -} - -template void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path); - -template void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.hpp b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.hpp index ada7c462dfcf..59ef11011879 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.hpp @@ -41,17 +41,4 @@ class UltraHonkAPI : public API { const std::filesystem::path& vk_path) override; }; -template -void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path); - -extern template void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path); - -extern template void write_recursion_inputs_ultra_honk(const std::string& bytecode_path, - const std::string& witness_path, - const std::string& output_path); - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp new file mode 100644 index 000000000000..4befc42d899f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp @@ -0,0 +1,236 @@ +#include "api_ultra_honk.hpp" +#include "barretenberg/api/file_io.hpp" +#include "barretenberg/bbapi/bbapi_ultra_honk.hpp" +#include "barretenberg/client_ivc/acir_bincode_mocks.hpp" +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/dsl/acir_format/acir_format.hpp" +#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" +#include "barretenberg/dsl/acir_format/proof_surgeon.hpp" +#include "barretenberg/flavor/ultra_flavor.hpp" +#include "barretenberg/flavor/ultra_rollup_flavor.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + +namespace bb { +std::vector compress(const std::vector& input); +std::vector decompress(const void* bytes, size_t size); +} // namespace bb + +using namespace bb; + +namespace { +// Create a unique temporary directory for each test run +// Uniqueness needed because tests are run in parallel and write to same file names. +std::filesystem::path get_test_dir(const std::string_view& test_name) +{ + std::filesystem::path temp_dir = "tmp_api_ultra_honk_test"; + std::filesystem::create_directories(temp_dir); + std::filesystem::create_directories(temp_dir / test_name); + return temp_dir / test_name; +} + +// Create test data +std::pair create_test_circuit_files(const std::filesystem::path& test_dir) +{ + auto [bytecode, witness] = acir_bincode_mocks::create_simple_circuit_bytecode(); + + auto bytecode_path = test_dir / "circuit.gz"; + auto witness_path = test_dir / "witness.gz"; + + write_file(bytecode_path, bb::compress(bytecode)); + write_file(witness_path, bb::compress(witness)); + + return { bytecode_path, witness_path }; +} + +} // namespace + +class ApiUltraHonkTest : public ::testing::Test { + protected: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + + void SetUp() override + { + const auto* info = ::testing::UnitTest::GetInstance()->current_test_info(); + test_dir = get_test_dir(info->name()); + } + + void TearDown() override + { + if (std::filesystem::exists(test_dir)) { + std::filesystem::remove_all(test_dir); + } + } + + std::filesystem::path test_dir; +}; + +TEST_F(ApiUltraHonkTest, ProveAndVerify) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; // Set default oracle hash type + + UltraHonkAPI api; + + // Generate VK first + auto vk_output_path = test_dir / "vk"; + std::filesystem::create_directories(vk_output_path); + api.write_vk(flags, bytecode_path, vk_output_path); + EXPECT_TRUE(std::filesystem::exists(vk_output_path / "vk")); + + // Generate proof + auto proof_output_dir = test_dir / "proof"; + std::filesystem::create_directories(proof_output_dir); + api.prove(flags, bytecode_path, witness_path, vk_output_path / "vk", proof_output_dir); + + // Check that proof files were created + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "proof")); + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "public_inputs")); + + // Verify the proof + bool verified = + api.verify(flags, proof_output_dir / "public_inputs", proof_output_dir / "proof", vk_output_path / "vk"); + EXPECT_TRUE(verified); +} + +TEST_F(ApiUltraHonkTest, ProveWithWriteVk) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; + flags.write_vk = true; + + UltraHonkAPI api; + + // Generate proof with write_vk flag (will compute and write VK) + auto proof_output_dir = test_dir / "proof"; + std::filesystem::create_directories(proof_output_dir); + api.prove(flags, bytecode_path, witness_path, "", proof_output_dir); + + // Check that proof and VK files were created + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "proof")); + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "public_inputs")); + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "vk")); + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "vk_hash")); + + // Verify the proof + bool verified = + api.verify(flags, proof_output_dir / "public_inputs", proof_output_dir / "proof", proof_output_dir / "vk"); + EXPECT_TRUE(verified); +} + +TEST_F(ApiUltraHonkTest, ProveAndVerifyWithFields) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + // First generate VK for the prove step + API::Flags vk_flags; + vk_flags.oracle_hash_type = "poseidon2"; + + UltraHonkAPI api; + + auto vk_output_path = test_dir / "vk"; + std::filesystem::create_directories(vk_output_path); + api.write_vk(vk_flags, bytecode_path, vk_output_path); + EXPECT_TRUE(std::filesystem::exists(vk_output_path / "vk")); + + // Now test proof generation + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; + + // Generate proof + auto proof_output_dir = test_dir / "proof"; + std::filesystem::create_directories(proof_output_dir); + api.prove(flags, bytecode_path, witness_path, vk_output_path / "vk", proof_output_dir); + + // Check that proof files were created + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "proof")); + EXPECT_TRUE(std::filesystem::exists(proof_output_dir / "public_inputs")); +} + +TEST_F(ApiUltraHonkTest, ProveWithDifferentSettings) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + // Test different oracle hash types + const std::vector> test_cases = { { "poseidon2", + false }, // oracle_hash_type, disable_zk + { "poseidon2", true }, + { "keccak", false }, + { "keccak", true } }; + + for (const auto& [oracle_hash_type, disable_zk] : test_cases) { + API::Flags flags; + flags.oracle_hash_type = oracle_hash_type; + flags.disable_zk = disable_zk; + flags.write_vk = true; + + auto case_dir = test_dir / (oracle_hash_type + "_" + (disable_zk ? "no_zk" : "zk")); + std::filesystem::create_directories(case_dir); + + UltraHonkAPI api; + + // Generate proof + api.prove(flags, bytecode_path, witness_path, "", case_dir); + + // Verify the proof + bool verified = api.verify(flags, case_dir / "public_inputs", case_dir / "proof", case_dir / "vk"); + EXPECT_TRUE(verified) << "Failed with oracle_hash_type=" << oracle_hash_type << ", disable_zk=" << disable_zk; + } +} + +TEST_F(ApiUltraHonkTest, WriteVk) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; + + UltraHonkAPI api; + api.write_vk(flags, bytecode_path, test_dir); + + // Test against bbapi::CircuitComputeVk + auto bytecode = read_file(bytecode_path); + auto expected_vk = + bbapi::CircuitComputeVk({ .circuit = { .bytecode = bb::decompress(bytecode.data(), bytecode.size()) }, + .settings = { .oracle_hash_type = flags.oracle_hash_type } }) + .execute(); + + info("after write_vk, expected_vk size: {}", expected_vk.bytes.size()); + EXPECT_EQ(expected_vk.bytes, read_file(test_dir / "vk")); + EXPECT_EQ(expected_vk.hash, read_file(test_dir / "vk_hash")); + + // Verify round-trip: decode the VK and check that to_field_elements() matches + auto vk_from_bytes = from_buffer(expected_vk.bytes); + auto vk_from_file = from_buffer(read_file(test_dir / "vk")); + EXPECT_EQ(vk_from_bytes.to_field_elements(), vk_from_file.to_field_elements()); +} + +// NOTE: very light test +TEST_F(ApiUltraHonkTest, GatesWithOpcodesSmokeTest) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + // Capture stdout + testing::internal::CaptureStdout(); + + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; + flags.include_gates_per_opcode = true; + UltraHonkAPI api; + api.gates(flags, bytecode_path); + + std::string output = testing::internal::GetCapturedStdout(); + + // Check that output contains per-opcode information + EXPECT_TRUE(output.find("gates_per_opcode") != std::string::npos); +} diff --git a/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp b/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp new file mode 100644 index 000000000000..62c4e8706a5f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp @@ -0,0 +1,316 @@ +#ifndef __wasm__ +#include "aztec_process.hpp" +#include "barretenberg/api/file_io.hpp" +#include "barretenberg/bbapi/bbapi_client_ivc.hpp" +#include "barretenberg/common/base64.hpp" +#include "barretenberg/common/get_bytecode.hpp" +#include "barretenberg/common/thread.hpp" +#include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/common/version.hpp" +#include "barretenberg/crypto/sha256/sha256.hpp" +#include +#include +#include +#include +#include + +#ifdef ENABLE_AVM_TRANSPILER +// Include avm_transpiler header +#include +#endif + +namespace bb { + +namespace { + +/** + * @brief Extract and decode bytecode from a function JSON object + */ +std::vector extract_bytecode(const nlohmann::json& function) +{ + if (!function.contains("bytecode")) { + throw_or_abort("Function missing bytecode field"); + } + + const auto& base64_bytecode = function["bytecode"].get(); + return decode_bytecode(base64_bytecode); +} + +/** + * @brief Compute SHA256 hash of bytecode and return as hex string + */ +std::string compute_bytecode_hash(const std::vector& bytecode) +{ + auto hash = crypto::sha256(bytecode); + std::ostringstream oss; + for (auto byte : hash) { + oss << std::hex << std::setw(2) << std::setfill('0') << static_cast(byte); + } + return oss.str(); +} + +/** + * @brief Get cache directory path (~/.bb/vk_cache/) + */ +std::filesystem::path get_cache_dir() +{ + const char* home = std::getenv("HOME"); + if (!home) { + home = "."; + } + std::filesystem::path cache_dir = std::filesystem::path(home) / ".bb" / BB_VERSION_PLACEHOLDER / "vk_cache"; + std::filesystem::create_directories(cache_dir); + return cache_dir; +} + +/** + * @brief Check if a function is a private constrained function + */ +bool is_private_constrained_function(const nlohmann::json& function) +{ + bool is_public = false; + bool is_unconstrained = false; + + // Check custom_attributes for "public" + if (function.contains("custom_attributes") && function["custom_attributes"].is_array()) { + for (const auto& attr : function["custom_attributes"]) { + if (attr.is_string() && attr.get() == "public") { + is_public = true; + break; + } + } + } + + // Check is_unconstrained + if (function.contains("is_unconstrained") && function["is_unconstrained"].is_boolean()) { + is_unconstrained = function["is_unconstrained"].get(); + } + + return !is_public && !is_unconstrained; +} + +/** + * @brief Get cached VK or generate if missing + */ +std::vector get_or_generate_cached_vk(const std::filesystem::path& cache_dir, + const std::string& circuit_name, + const std::vector& bytecode, + bool force) +{ + std::string hash_str = compute_bytecode_hash(bytecode); + std::filesystem::path vk_cache_path = cache_dir / (hash_str + ".vk"); + + // Check cache unless force is true + if (!force && std::filesystem::exists(vk_cache_path)) { + info("Verification key already in cache: ", hash_str); + return read_file(vk_cache_path); + } + + // Generate new VK + info("Generating verification key: ", hash_str); + auto response = + bbapi::ClientIvcComputeStandaloneVk{ .circuit = { .name = circuit_name, .bytecode = bytecode } }.execute(); + + // Cache the VK + write_file(vk_cache_path, response.bytes); + + return response.bytes; +} + +/** + * @brief Generate VKs for all functions in parallel + */ +void generate_vks_for_functions(const std::filesystem::path& cache_dir, + std::vector& functions, + bool force) +{ + // Generate VKs in parallel (logging removed to avoid data races) + parallel_for(functions.size(), [&](size_t i) { + auto* function = functions[i]; + std::string fn_name = (*function)["name"].get(); + + // Get bytecode from function + auto bytecode = extract_bytecode(*function); + + // Generate and cache VK (this will log internally if needed) + get_or_generate_cached_vk(cache_dir, fn_name, bytecode, force); + }); + + // Update JSON with VKs from cache (sequential is fine here, it's fast) + for (auto* function : functions) { + std::string fn_name = (*function)["name"].get(); + + // Get bytecode to compute hash + auto bytecode = extract_bytecode(*function); + + // Read VK from cache + std::string hash_str = compute_bytecode_hash(bytecode); + std::filesystem::path vk_cache_path = cache_dir / (hash_str + ".vk"); + auto vk_data = read_file(vk_cache_path); + + // Encode to base64 and store in JSON + std::string encoded_vk = base64_encode(vk_data.data(), vk_data.size(), false); + (*function)["verification_key"] = encoded_vk; + } +} + +} // anonymous namespace + +/** + * @brief Transpile the artifact file (or copy if transpiler not enabled) + */ +bool transpile_artifact(const std::string& input_path, const std::string& output_path) +{ +#ifdef ENABLE_AVM_TRANSPILER + info("Transpiling: ", input_path, " -> ", output_path); + + auto result = avm_transpile_file(input_path.c_str(), output_path.c_str()); + + if (result.success == 0) { + if (result.error_message) { + std::string error_msg(result.error_message); + if (error_msg == "Contract already transpiled") { + // Already transpiled, copy if different paths + if (input_path != output_path) { + std::filesystem::copy_file( + input_path, output_path, std::filesystem::copy_options::overwrite_existing); + } + } else { + info("Transpilation failed: ", error_msg); + avm_free_result(&result); + return false; + } + } else { + info("Transpilation failed"); + avm_free_result(&result); + return false; + } + } + + avm_free_result(&result); + + info("Transpiled: ", input_path, " -> ", output_path); +#else + // If transpiler is not enabled, just copy the file + info("Warning: AVM Transpiler is not enabled. Skipping transpilation."); + if (input_path != output_path) { + std::filesystem::copy_file(input_path, output_path, std::filesystem::copy_options::overwrite_existing); + } +#endif + return true; +} + +bool process_aztec_artifact(const std::string& input_path, const std::string& output_path, bool force) +{ + if (!transpile_artifact(input_path, output_path)) { + return false; + } + + // Verify output exists + if (!std::filesystem::exists(output_path)) { + throw_or_abort("Output file does not exist after transpilation"); + } + + // Step 2: Generate verification keys + auto cache_dir = get_cache_dir(); + info("Generating verification keys for functions in ", std::filesystem::path(output_path).filename().string()); + info("Cache directory: ", cache_dir.string()); + + // Read and parse artifact JSON + auto artifact_content = read_file(output_path); + std::string artifact_str(artifact_content.begin(), artifact_content.end()); + auto artifact_json = nlohmann::json::parse(artifact_str); + + if (!artifact_json.contains("functions")) { + info("Warning: No functions found in artifact"); + return true; + } + + // Filter to private constrained functions + std::vector private_functions; + for (auto& function : artifact_json["functions"]) { + if (is_private_constrained_function(function)) { + private_functions.push_back(&function); + } + } + + if (private_functions.empty()) { + info("No private constrained functions found"); + return true; + } + + // Generate VKs + generate_vks_for_functions(cache_dir, private_functions, force); + + // Write updated JSON back to file + std::ofstream out_file(output_path); + out_file << artifact_json.dump(2) << std::endl; + out_file.close(); + + info("Successfully processed: ", input_path, " -> ", output_path); + return true; +} + +std::vector find_contract_artifacts(const std::string& search_path) +{ + std::vector artifacts; + + // Recursively search for .json files in target/ directories, excluding cache/ + for (const auto& entry : std::filesystem::recursive_directory_iterator(search_path)) { + if (!entry.is_regular_file()) { + continue; + } + + const auto& path = entry.path(); + + // Must be a .json file + if (path.extension() != ".json") { + continue; + } + + // Must be in a target/ directory + std::string path_str = path.string(); + if (path_str.find("/target/") == std::string::npos && path_str.find("\\target\\") == std::string::npos) { + continue; + } + + // Exclude cache directories and function artifact temporaries + if (path_str.find("/cache/") != std::string::npos || path_str.find("\\cache\\") != std::string::npos || + path_str.find(".function_artifact_") != std::string::npos) { + continue; + } + + artifacts.push_back(path.string()); + } + + return artifacts; +} + +bool process_all_artifacts(const std::string& search_path, bool force) +{ + auto artifacts = find_contract_artifacts(search_path); + + if (artifacts.empty()) { + info("No contract artifacts found. Please compile your contracts first with 'nargo compile'."); + return false; + } + + info("Found ", artifacts.size(), " contract artifact(s) to process"); + + bool all_success = true; + for (const auto& artifact : artifacts) { + // Process in-place (input == output) + if (!process_aztec_artifact(artifact, artifact, force)) { + all_success = false; + } + } + + if (all_success) { + info("Contract postprocessing complete!"); + } + + return all_success; +} + +} // namespace bb +#endif diff --git a/barretenberg/cpp/src/barretenberg/api/aztec_process.hpp b/barretenberg/cpp/src/barretenberg/api/aztec_process.hpp new file mode 100644 index 000000000000..8e3b7bba9084 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/api/aztec_process.hpp @@ -0,0 +1,38 @@ +#ifndef __wasm__ +#pragma once +#include +#include +#include +#include + +namespace bb { + +/** + * @brief Process Aztec contract artifacts: transpile and generate verification keys + * + * @param input_path Path to input artifact JSON + * @param output_path Path to output artifact JSON (can be same as input) + * @param force Force regeneration even if cached + * @return true on success, false on failure + */ +bool process_aztec_artifact(const std::string& input_path, const std::string& output_path, bool force = false); + +/** + * @brief Find all contract artifacts in target/ directories + * + * @param search_path Root path to search from (defaults to current directory) + * @return Vector of paths to contract artifacts + */ +std::vector find_contract_artifacts(const std::string& search_path = "."); + +/** + * @brief Process all discovered contract artifacts in a directory tree + * + * @param search_path Root path to search from (defaults to current directory) + * @param force Force regeneration even if cached + * @return true if all artifacts processed successfully + */ +bool process_all_artifacts(const std::string& search_path = ".", bool force = false); + +} // namespace bb +#endif diff --git a/barretenberg/cpp/src/barretenberg/api/exec_pipe.hpp b/barretenberg/cpp/src/barretenberg/api/exec_pipe.hpp deleted file mode 100644 index 793001136d61..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/exec_pipe.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -#include "barretenberg/common/throw_or_abort.hpp" -#include -#include -#include -#include -#include - -namespace bb { -inline std::vector exec_pipe([[maybe_unused]] const std::string& command) -{ -#ifdef __wasm__ - throw_or_abort("Can't use popen() in wasm! Implement this functionality natively."); -#else - // popen() with "r" captures only stdout; stderr is inherited unchanged. - std::unique_ptr pipe(popen(command.c_str(), "r"), pclose); // NOLINT - if (!pipe) { - throw_or_abort("popen() failed: '" + command + "' due to " + strerror(errno)); - } - - std::vector output; - uint8_t buf[4096]; // NOLINT - - while (size_t n = fread(buf, 1, sizeof(buf), pipe.get())) { - output.insert(output.end(), buf, buf + n); - } - return output; -#endif -} -} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/file_io.hpp b/barretenberg/cpp/src/barretenberg/api/file_io.hpp index d1029b6a388d..c5436170d0ca 100644 --- a/barretenberg/cpp/src/barretenberg/api/file_io.hpp +++ b/barretenberg/cpp/src/barretenberg/api/file_io.hpp @@ -1,12 +1,14 @@ #pragma once #include "barretenberg/common/log.hpp" #include "barretenberg/common/try_catch_shim.hpp" +#include "barretenberg/ecc/curves/bn254/fr.hpp" #include #include #include #include #include #include +#include #include #include #include @@ -84,4 +86,18 @@ inline void write_file(const std::string& filename, std::vector const& file.close(); } } + +template inline std::string field_elements_to_json(const std::vector& fields) +{ + std::stringstream ss; + ss << "["; + for (size_t i = 0; i < fields.size(); ++i) { + ss << '"' << fields[i] << '"'; + if (i != fields.size() - 1) { + ss << ","; + } + } + ss << "]"; + return ss.str(); +} } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/gate_count.hpp b/barretenberg/cpp/src/barretenberg/api/gate_count.hpp deleted file mode 100644 index b2e7fee03d9a..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/gate_count.hpp +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once -#include "barretenberg/api/acir_format_getters.hpp" -#include "barretenberg/constants.hpp" - -namespace bb { -/** - * @brief Computes the number of Barretenberg specific gates needed to create a proof for the specific ACIR circuit. - * - * Communication: - * - stdout: A JSON string of the number of ACIR opcodes and final backend circuit size. - * TODO(https://github.com/AztecProtocol/barretenberg/issues/1126): split this into separate Plonk and Honk functions as - * their gate count differs - * - * @param bytecode_path Path to the file containing the serialized circuit - */ -template -void gate_count(const std::string& bytecode_path, - bool recursive, - uint32_t honk_recursion, - bool include_gates_per_opcode) -{ - // All circuit reports will be built into the string below - std::string functions_string = "{\"functions\": [\n "; - auto constraint_systems = get_constraint_systems(bytecode_path); - - const acir_format::ProgramMetadata metadata{ .recursive = recursive, - .honk_recursion = honk_recursion, - .collect_gates_per_opcode = include_gates_per_opcode }; - size_t i = 0; - for (const auto& constraint_system : constraint_systems) { - acir_format::AcirProgram program{ constraint_system }; - auto builder = acir_format::create_circuit(program, metadata); - builder.finalize_circuit(/*ensure_nonzero=*/true); - size_t circuit_size = builder.get_finalized_total_circuit_size(); - vinfo("Calculated circuit size in gate_count: ", circuit_size); - - // Build individual circuit report - std::string gates_per_opcode_str; - for (size_t j = 0; j < program.constraints.gates_per_opcode.size(); j++) { - gates_per_opcode_str += std::to_string(program.constraints.gates_per_opcode[j]); - if (j != program.constraints.gates_per_opcode.size() - 1) { - gates_per_opcode_str += ","; - } - } - - auto result_string = format( - "{\n \"acir_opcodes\": ", - program.constraints.num_acir_opcodes, - ",\n \"circuit_size\": ", - circuit_size, - (include_gates_per_opcode ? format(",\n \"gates_per_opcode\": [", gates_per_opcode_str, "]") : ""), - "\n }"); - - // Attach a comma if there are more circuit reports to generate - if (i != (constraint_systems.size() - 1)) { - result_string = format(result_string, ","); - } - - functions_string = format(functions_string, result_string); - - i++; - } - std::cout << format(functions_string, "\n]}"); -} - -} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/get_bytecode.hpp b/barretenberg/cpp/src/barretenberg/api/get_bytecode.hpp deleted file mode 100644 index e7aafe124f58..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/get_bytecode.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -#include "exec_pipe.hpp" -#include -#include -#include - -/** - * We can assume for now we're running on a unix like system and use the following to extract the bytecode. - */ -inline std::vector gunzip(const std::string& path) -{ - std::string command = "gunzip -c \"" + path + "\""; - return bb::exec_pipe(command); -} - -inline std::vector get_bytecode(const std::string& bytecodePath) -{ - if (bytecodePath == "-") { - return { (std::istreambuf_iterator(std::cin)), std::istreambuf_iterator() }; - } - std::filesystem::path filePath = bytecodePath; - if (filePath.extension() == ".json") { - // Try reading json files as if they are a Nargo build artifact - std::string command = "jq -r '.bytecode' \"" + bytecodePath + "\" | base64 -d | gunzip -c"; - return bb::exec_pipe(command); - } - - // For other extensions, assume file is a raw ACIR program - return gunzip(bytecodePath); -} diff --git a/barretenberg/cpp/src/barretenberg/api/prove_tube.cpp b/barretenberg/cpp/src/barretenberg/api/prove_tube.cpp deleted file mode 100644 index 8ca5ea58c610..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/prove_tube.cpp +++ /dev/null @@ -1,116 +0,0 @@ -#include "prove_tube.hpp" -#include "barretenberg/api/file_io.hpp" -#include "barretenberg/common/map.hpp" -#include "barretenberg/honk/proof_system/types/proof.hpp" -#include "barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp" -#include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" - -namespace bb { -/** - * @brief Creates a Honk Proof for the Tube circuit responsible for recursively verifying a ClientIVC proof. - * - * @param output_path the working directory from which the proof and verification data are read - * @param num_unused_public_inputs - */ -void prove_tube(const std::string& output_path, const std::string& vk_path) -{ - using namespace stdlib::recursion::honk; - - using Builder = UltraCircuitBuilder; - using StdlibProof = ClientIVCRecursiveVerifier::StdlibProof; - using HidingKernelIO = stdlib::recursion::honk::HidingKernelIO; - using RollupIO = stdlib::recursion::honk::RollupIO; - - std::string proof_path = output_path + "/proof"; - - // Read the proof and verification data from given files - auto proof = ClientIVC::Proof::from_file_msgpack(proof_path); - auto vk = from_buffer(read_file(vk_path)); - - auto builder = std::make_shared(); - - ClientIVCRecursiveVerifier verifier{ builder, vk }; - - StdlibProof stdlib_proof(*builder, proof); - ClientIVCRecursiveVerifier::Output client_ivc_rec_verifier_output = verifier.verify(stdlib_proof); - - // The public inputs in the proof are propagated to the base rollup by making them public inputs of this circuit. - // Exclude the public inputs of the Hiding Kernel: the pairing points are handled separately, the ecc op tables are - // not needed after this point - auto num_inner_public_inputs = vk.mega->num_public_inputs - HidingKernelIO::PUBLIC_INPUTS_SIZE; - for (size_t i = 0; i < num_inner_public_inputs; i++) { - stdlib_proof.mega_proof[i].set_public(); - } - - // IO - RollupIO inputs; - inputs.pairing_inputs = client_ivc_rec_verifier_output.points_accumulator; - inputs.ipa_claim = client_ivc_rec_verifier_output.opening_claim; - inputs.set_public(); - - // The tube only calls an IPA recursive verifier once, so we can just add this IPA proof - builder->ipa_proof = client_ivc_rec_verifier_output.ipa_proof.get_value(); - BB_ASSERT_EQ(builder->ipa_proof.size(), IPA_PROOF_LENGTH, "IPA proof should be set."); - - using Prover = UltraProver_; - using Verifier = UltraVerifier_; - auto proving_key = std::make_shared>(*builder); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1201): Precompute tube vk and pass it in. - info("WARNING: computing tube vk in prove_tube, but a precomputed vk should be passed in."); - auto tube_verification_key = std::make_shared(proving_key->get_precomputed()); - - Prover tube_prover{ proving_key, tube_verification_key }; - auto tube_proof = tube_prover.construct_proof(); - std::string tubePublicInputsPath = output_path + "/public_inputs"; - std::string tubeProofPath = output_path + "/proof"; - PublicInputsAndProof public_inputs_and_proof{ - PublicInputsVector(tube_proof.begin(), - tube_proof.begin() + static_cast(num_inner_public_inputs)), - HonkProof(tube_proof.begin() + static_cast(num_inner_public_inputs), tube_proof.end()) - }; - write_file(tubePublicInputsPath, to_buffer(public_inputs_and_proof.public_inputs)); - write_file(tubeProofPath, to_buffer(public_inputs_and_proof.proof)); - - std::string tubePublicInputsAsFieldsPath = output_path + "/public_inputs_fields.json"; - std::string tubeProofAsFieldsPath = output_path + "/proof_fields.json"; - const auto to_json = [](const std::vector& data) { - if (data.empty()) { - return std::string("[]"); - } - return format("[", join(transform::map(data, [](auto fr) { return format("\"", fr, "\""); })), "]"); - }; - auto public_inputs_data = to_json(public_inputs_and_proof.public_inputs); - auto proof_data = to_json(public_inputs_and_proof.proof); - write_file(tubePublicInputsAsFieldsPath, { public_inputs_data.begin(), public_inputs_data.end() }); - write_file(tubeProofAsFieldsPath, { proof_data.begin(), proof_data.end() }); - - std::string tubeVkPath = output_path + "/vk"; - write_file(tubeVkPath, to_buffer(tube_verification_key)); - - std::string tubeAsFieldsVkPath = output_path + "/vk_fields.json"; - auto field_els = tube_verification_key->to_field_elements(); - info("verificaton key length in fields:", field_els.size()); - auto data = to_json(field_els); - write_file(tubeAsFieldsVkPath, { data.begin(), data.end() }); - - info("Native verification of the tube_proof"); - VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); - Verifier tube_verifier(tube_verification_key, ipa_verification_key); - - // Break up the tube proof into the honk portion and the ipa portion - const size_t HONK_PROOF_LENGTH_WITHOUT_INNER_PUB_INPUTS = - UltraRollupFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + RollupIO::PUBLIC_INPUTS_SIZE; - // The extra calculation is for the IPA proof length. - BB_ASSERT_EQ(tube_proof.size(), - HONK_PROOF_LENGTH_WITHOUT_INNER_PUB_INPUTS + num_inner_public_inputs, - "In prove_tube, tube proof length is incorrect."); - // split out the ipa proof - const std::ptrdiff_t honk_proof_with_pub_inputs_length = static_cast( - HONK_PROOF_LENGTH_WITHOUT_INNER_PUB_INPUTS - IPA_PROOF_LENGTH + num_inner_public_inputs); - auto ipa_proof = HonkProof(tube_proof.begin() + honk_proof_with_pub_inputs_length, tube_proof.end()); - auto tube_honk_proof = HonkProof(tube_proof.begin(), tube_proof.end() + honk_proof_with_pub_inputs_length); - bool verified = tube_verifier.template verify_proof(tube_honk_proof, ipa_proof).result; - info("Tube proof verification: ", verified); -} - -} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/prove_tube.hpp b/barretenberg/cpp/src/barretenberg/api/prove_tube.hpp deleted file mode 100644 index 0e01546b6436..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/prove_tube.hpp +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once -#include - -namespace bb { -/** - * @brief Creates a Honk Proof for the Tube circuit responsible for recursively verifying a ClientIVC proof. - * - * @param output_path the working directory from which the proof is read and output is written - * @param vk_path the path to the verification key data to use when proving (this is the one of two ClientIVC VKs, - * public or private tail) - */ -void prove_tube(const std::string& output_path, const std::string& vk_path); - -} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/api/write_prover_output.hpp b/barretenberg/cpp/src/barretenberg/api/write_prover_output.hpp deleted file mode 100644 index 538263f5cdf7..000000000000 --- a/barretenberg/cpp/src/barretenberg/api/write_prover_output.hpp +++ /dev/null @@ -1,205 +0,0 @@ -#pragma once -#include "barretenberg/api/file_io.hpp" -#include "barretenberg/api/log.hpp" -#include "barretenberg/common/container.hpp" -#include "barretenberg/common/log.hpp" -#include "barretenberg/common/map.hpp" -#include "barretenberg/ecc/curves/bn254/fr.hpp" -#include "barretenberg/honk/proof_system/types/proof.hpp" -#include - -namespace bb { - -inline std::string field_elements_to_json(const std::vector& fields) -{ - std::stringstream ss; - ss << "["; - for (size_t i = 0; i < fields.size(); ++i) { - ss << '"' << fields[i] << '"'; - if (i < fields.size() - 1) { - ss << ","; - } - } - ss << "]"; - return ss.str(); -} - -template struct PubInputsProofAndKey { - PublicInputsVector public_inputs; - typename Flavor::Transcript::Proof proof; - std::shared_ptr key; - fr vk_hash; -}; - -template std::string to_json(const std::vector& data) -{ - if (data.empty()) { - return std::string("[]"); - } - return format("[", join(transform::map(data, [](const T& el) { return format("\"", el, "\""); })), "]"); -} - -template -void write(const ProverOutput& prover_output, - const std::string& output_format, - const std::string& output_content, - const std::filesystem::path& output_dir) -{ - enum class ObjectToWrite : size_t { PUBLIC_INPUTS, PROOF, VK, VK_HASH }; - const bool output_to_stdout = output_dir == "-"; - - const auto to_json_fr = [](const bb::fr& fr) { return format("\"", fr, "\""); }; - - const auto write_bytes = [&](const ObjectToWrite& obj) { - switch (obj) { - case ObjectToWrite::PUBLIC_INPUTS: { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1312): Try to avoid include_size=true, which is - // used for deserialization. - const auto buf = to_buffer(prover_output.public_inputs); - if (output_to_stdout) { - write_bytes_to_stdout(buf); - } else { - write_file(output_dir / "public_inputs", buf); - info("Public inputs saved to ", output_dir / "public_inputs"); - } - break; - } - case ObjectToWrite::PROOF: { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1312): Try to avoid include_size=true, which is - // used for deserialization. - const auto buf = to_buffer(prover_output.proof); - if (output_to_stdout) { - write_bytes_to_stdout(buf); - } else { - write_file(output_dir / "proof", buf); - info("Proof saved to ", output_dir / "proof"); - } - break; - } - case ObjectToWrite::VK: { - const auto buf = to_buffer(prover_output.key); - if (output_to_stdout) { - write_bytes_to_stdout(buf); - } else { - write_file(output_dir / "vk", buf); - info("VK saved to ", output_dir / "vk"); - } - break; - } - case ObjectToWrite::VK_HASH: { - const auto buf = to_buffer(prover_output.vk_hash); - if (output_to_stdout) { - write_bytes_to_stdout(buf); - } else { - write_file(output_dir / "vk_hash", buf); - info("VK Hash saved to ", output_dir / "vk_hash"); - } - break; - } - } - }; - - const auto write_fields = [&](const ObjectToWrite& obj) { - switch (obj) { - case ObjectToWrite::PUBLIC_INPUTS: { - const std::string public_inputs_json = to_json(prover_output.public_inputs); - if (output_to_stdout) { - std::cout << public_inputs_json; - } else { - write_file(output_dir / "public_inputs_fields.json", - { public_inputs_json.begin(), public_inputs_json.end() }); - info("Public inputs fields saved to ", output_dir / "public_inputs_fields.json"); - } - break; - } - case ObjectToWrite::PROOF: { - const std::string proof_json = to_json(prover_output.proof); - if (output_to_stdout) { - std::cout << proof_json; - } else { - write_file(output_dir / "proof_fields.json", { proof_json.begin(), proof_json.end() }); - info("Proof fields saved to ", output_dir / "proof_fields.json"); - } - break; - } - case ObjectToWrite::VK: { - const std::string vk_json = to_json(prover_output.key->to_field_elements()); - if (output_to_stdout) { - std::cout << vk_json; - } else { - write_file(output_dir / "vk_fields.json", { vk_json.begin(), vk_json.end() }); - info("VK fields saved to ", output_dir / "vk_fields.json"); - } - break; - } - case ObjectToWrite::VK_HASH: { - const std::string vk_hash_json = to_json_fr(prover_output.vk_hash); - if (output_to_stdout) { - std::cout << vk_hash_json; - } else { - write_file(output_dir / "vk_hash_fields.json", { vk_hash_json.begin(), vk_hash_json.end() }); - info("VK Hash fields saved to ", output_dir / "vk_hash_fields.json"); - } - break; - } - } - }; - - if (output_content == "proof") { - if (output_format == "bytes") { - write_bytes(ObjectToWrite::PUBLIC_INPUTS); - write_bytes(ObjectToWrite::PROOF); - } else if (output_format == "fields") { - write_fields(ObjectToWrite::PUBLIC_INPUTS); - write_fields(ObjectToWrite::PROOF); - } else if (output_format == "bytes_and_fields") { - write_bytes(ObjectToWrite::PUBLIC_INPUTS); - write_fields(ObjectToWrite::PUBLIC_INPUTS); - write_bytes(ObjectToWrite::PROOF); - write_fields(ObjectToWrite::PROOF); - } else { - throw_or_abort("Invalid output_format for output_content proof"); - } - } else if (output_content == "vk") { - if (output_format == "bytes") { - write_bytes(ObjectToWrite::VK); - write_bytes(ObjectToWrite::VK_HASH); - } else if (output_format == "fields") { - write_fields(ObjectToWrite::VK); - write_fields(ObjectToWrite::VK_HASH); - } else if (output_format == "bytes_and_fields") { - write_bytes(ObjectToWrite::VK); - write_bytes(ObjectToWrite::VK_HASH); - write_fields(ObjectToWrite::VK); - write_fields(ObjectToWrite::VK_HASH); - } else { - throw_or_abort("Invalid output_format for output_content vk"); - } - } else if (output_content == "proof_and_vk") { - if (output_format == "bytes") { - write_bytes(ObjectToWrite::PUBLIC_INPUTS); - write_bytes(ObjectToWrite::PROOF); - write_bytes(ObjectToWrite::VK); - write_bytes(ObjectToWrite::VK_HASH); - } else if (output_format == "fields") { - write_fields(ObjectToWrite::PUBLIC_INPUTS); - write_fields(ObjectToWrite::PROOF); - write_fields(ObjectToWrite::VK); - write_fields(ObjectToWrite::VK_HASH); - } else if (output_format == "bytes_and_fields") { - write_bytes(ObjectToWrite::PUBLIC_INPUTS); - write_fields(ObjectToWrite::PUBLIC_INPUTS); - write_bytes(ObjectToWrite::PROOF); - write_fields(ObjectToWrite::PROOF); - write_bytes(ObjectToWrite::VK); - write_bytes(ObjectToWrite::VK_HASH); - write_fields(ObjectToWrite::VK); - write_fields(ObjectToWrite::VK_HASH); - } else { - throw_or_abort("Invalid output_format for output_content proof_and_vk"); - } - } else { - throw_or_abort("Invalid std::string"); - } -} -} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/bb/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/bb/CMakeLists.txt index b35f2ad5c103..5e9720a30de1 100644 --- a/barretenberg/cpp/src/barretenberg/bb/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/bb/CMakeLists.txt @@ -1,29 +1,28 @@ -# Used also by bb_cli_bench -barretenberg_module( - bb-cli-lib - barretenberg - env - api - circuit_checker - ${TRACY_LIBS} - libdeflate::libdeflate_static) - -if(NOT DISABLE_AZTEC_VM) - add_dependencies(bb-cli-lib vm2) -endif() - if (NOT(FUZZING)) add_executable( bb main.cpp + cli.cpp ) target_link_libraries( bb PRIVATE - bb-cli-lib + barretenberg + env + api + circuit_checker + ${TRACY_LIBS} + libdeflate::libdeflate_static tracy_mem ) - if(CHECK_CIRCUIT_STACKTRACES) + # Link avm_transpiler when library is provided + if(AVM_TRANSPILER_LIB) + target_link_libraries(bb PRIVATE ${AVM_TRANSPILER_LIB}) + endif() + if(NOT DISABLE_AZTEC_VM) + add_dependencies(bb vm2) + endif() + if(ENABLE_STACKTRACES) target_link_libraries( bb PUBLIC diff --git a/barretenberg/cpp/src/barretenberg/bb/cli.cpp b/barretenberg/cpp/src/barretenberg/bb/cli.cpp index 91a4704f59a8..4a5c15a0764b 100644 --- a/barretenberg/cpp/src/barretenberg/bb/cli.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/cli.cpp @@ -18,12 +18,15 @@ #include "barretenberg/api/api_client_ivc.hpp" #include "barretenberg/api/api_msgpack.hpp" #include "barretenberg/api/api_ultra_honk.hpp" -#include "barretenberg/api/gate_count.hpp" -#include "barretenberg/api/prove_tube.hpp" +#include "barretenberg/api/aztec_process.hpp" +#include "barretenberg/api/file_io.hpp" #include "barretenberg/bb/cli11_formatter.hpp" #include "barretenberg/bbapi/bbapi.hpp" +#include "barretenberg/bbapi/bbapi_ultra_honk.hpp" #include "barretenberg/bbapi/c_bind.hpp" +#include "barretenberg/common/op_count.hpp" #include "barretenberg/common/thread.hpp" +#include "barretenberg/common/version.hpp" #include "barretenberg/flavor/ultra_rollup_flavor.hpp" #include "barretenberg/srs/factories/native_crs_factory.hpp" #include "barretenberg/srs/global_crs.hpp" @@ -31,9 +34,6 @@ #include namespace bb { -// This is updated in-place by bootstrap.sh during the release process. This prevents -// the version string from needing to be present at build-time, simplifying e.g. caching. -const char* const BB_VERSION_PLACEHOLDER = "00000000.00000000.00000000"; // TODO(https://github.com/AztecProtocol/barretenberg/issues/1257): Remove unused/seemingly unnecessary flags. // TODO(https://github.com/AztecProtocol/barretenberg/issues/1258): Improve defaults. @@ -109,6 +109,11 @@ int parse_and_run_cli_command(int argc, char* argv[]) #else name += "\nAztec Virtual Machine (AVM): enabled"; #endif +#ifdef ENABLE_AVM_TRANSPILER + name += "\nAVM Transpiler: enabled"; +#else + name += "\nAVM Transpiler: disabled"; +#endif #ifdef STARKNET_GARAGA_FLAVORS name += "\nStarknet Garaga Extensions: enabled"; #else @@ -140,7 +145,6 @@ int parse_and_run_cli_command(int argc, char* argv[]) std::filesystem::path vk_path{ "./target/vk" }; flags.scheme = ""; flags.oracle_hash_type = "poseidon2"; - flags.output_format = "bytes"; flags.crs_path = srs::bb_crs_path(); flags.include_gates_per_opcode = false; const auto add_output_path_option = [&](CLI::App* subcommand, auto& _output_path) { @@ -153,18 +157,9 @@ int parse_and_run_cli_command(int argc, char* argv[]) * Subcommand: Adders for options that we will create for more than one subcommand ***************************************************************************************************************/ - const auto add_recursive_flag = [&](CLI::App* subcommand) { + const auto add_ipa_accumulation_flag = [&](CLI::App* subcommand) { return subcommand->add_flag( - "--recursive", flags.recursive, "Do some things relating to recursive verification and KZG..."); - }; - - const auto add_honk_recursion_option = [&](CLI::App* subcommand) { - return subcommand->add_option( - "--honk_recursion", - flags.honk_recursion, - "Instruct the prover that this circuit will be recursively verified with " - "UltraHonk (1) or with UltraRollupHonk (2). Ensures a pairing point accumulator " - "(and additionally an IPA claim when UltraRollupHonk) is added to the public inputs of the proof."); + "--ipa_accumulation", flags.ipa_accumulation, "Accumulate/Aggregate IPA (Inner Product Argument) claims"); }; const auto add_scheme_option = [&](CLI::App* subcommand) { @@ -200,38 +195,16 @@ int parse_and_run_cli_command(int argc, char* argv[]) ->check(CLI::IsMember({ "poseidon2", "keccak", "starknet" }).name("is_member")); }; - const auto add_output_format_option = [&](CLI::App* subcommand) { - return subcommand - ->add_option( - "--output_format", - flags.output_format, - "The type of the data to be written by the command. If bytes, output the raw bytes prefixed with " - "header information for deserialization. If fields, output a string representation of an array of " - "field elements. If bytes_and_fields do both. If fields_msgpack, outputs a msgpack buffer of Fr " - "elements.") - ->check(CLI::IsMember({ "bytes", "fields", "bytes_and_fields", "fields_msgpack" }).name("is_member")); - }; - const auto add_write_vk_flag = [&](CLI::App* subcommand) { return subcommand->add_flag("--write_vk", flags.write_vk, "Write the provided circuit's verification key"); }; - const auto add_ipa_accumulation_flag = [&](CLI::App* subcommand) { - return subcommand->add_flag( - "--ipa_accumulation", flags.ipa_accumulation, "Accumulate/Aggregate IPA (Inner Product Argument) claims"); - }; - const auto remove_zk_option = [&](CLI::App* subcommand) { return subcommand->add_flag("--disable_zk", flags.disable_zk, "Use a non-zk version of --scheme. This flag is set to false by default."); }; - const auto add_init_kzg_accumulator_option = [&](CLI::App* subcommand) { - return subcommand->add_flag( - "--init_kzg_accumulator", flags.init_kzg_accumulator, "Initialize pairing point accumulator."); - }; - const auto add_bytecode_path_option = [&](CLI::App* subcommand) { subcommand->add_option("--bytecode_path, -b", bytecode_path, "Path to ACIR bytecode generated by Noir.") /* ->check(CLI::ExistingFile) OR stdin indicator - */; @@ -271,9 +244,11 @@ int parse_and_run_cli_command(int argc, char* argv[]) "recursive verifier) or is it for an ivc verifier? `standalone` produces a verification key " "is sufficient for verifying proofs about a single circuit (including the non-encsapsulated " "use case where an IVC scheme is manually constructed via recursive UltraHonk proof " - "verification). `ivc` produces a verification key for verifying the stack of run though a " - "dedicated ivc verifier class (currently the only option is the ClientIVC class) ") - ->check(CLI::IsMember({ "standalone", "ivc" }).name("is_member")); + "verification). `standalone_hiding` is similar to `standalone` but is used for the last step " + "where the structured trace is not utilized. `ivc` produces a verification key for verifying " + "the stack of run though a dedicated ivc verifier class (currently the only option is the " + "ClientIVC class)") + ->check(CLI::IsMember({ "standalone", "standalone_hiding", "ivc" }).name("is_member")); }; const auto add_verbose_flag = [&](CLI::App* subcommand) { @@ -299,6 +274,21 @@ int parse_and_run_cli_command(int argc, char* argv[]) return subcommand->add_flag("--update_inputs", flags.update_inputs, "Update inputs if vk check fails."); }; + const auto add_optimized_solidity_verifier_flag = [&](CLI::App* subcommand) { + return subcommand->add_flag( + "--optimized", flags.optimized_solidity_verifier, "Use the optimized Solidity verifier."); + }; + + bool print_op_counts = false; + const auto add_print_op_counts_flag = [&](CLI::App* subcommand) { + return subcommand->add_flag("--print_op_counts", print_op_counts, "Print op counts to json on one line."); + }; + + std::string op_counts_out; + const auto add_op_counts_out_option = [&](CLI::App* subcommand) { + return subcommand->add_option("--op_counts_out", op_counts_out, "Path to write the op counts in a json."); + }; + /*************************************************************************************************************** * Top-level flags ***************************************************************************************************************/ @@ -336,8 +326,9 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_scheme_option(gates); add_verbose_flag(gates); add_bytecode_path_option(gates); - add_honk_recursion_option(gates); add_include_gates_per_opcode_flag(gates); + add_oracle_hash_option(gates); + add_ipa_accumulation_flag(gates); /*************************************************************************************************************** * Subcommand: prove @@ -350,19 +341,16 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_output_path_option(prove, output_path); add_ivc_inputs_path_options(prove); add_vk_path_option(prove); - add_verbose_flag(prove); add_debug_flag(prove); add_crs_path_option(prove); add_oracle_hash_option(prove); - add_output_format_option(prove); add_write_vk_flag(prove); - remove_zk_option(prove); - add_init_kzg_accumulator_option(prove); add_ipa_accumulation_flag(prove); - add_recursive_flag(prove); - add_honk_recursion_option(prove); + remove_zk_option(prove); add_slow_low_memory_flag(prove); + add_print_op_counts_flag(prove); + add_op_counts_out_option(prove); prove->add_flag("--verify", "Verify the proof natively, resulting in a boolean output. Useful for testing."); @@ -382,13 +370,9 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_verbose_flag(write_vk); add_debug_flag(write_vk); - add_output_format_option(write_vk); add_crs_path_option(write_vk); - add_init_kzg_accumulator_option(write_vk); add_oracle_hash_option(write_vk); add_ipa_accumulation_flag(write_vk); - add_honk_recursion_option(write_vk); - add_recursive_flag(write_vk); add_verifier_type_option(write_vk)->default_val("standalone"); remove_zk_option(write_vk); @@ -408,9 +392,6 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_oracle_hash_option(verify); remove_zk_option(verify); add_ipa_accumulation_flag(verify); - add_init_kzg_accumulator_option(verify); - add_honk_recursion_option(verify); - add_recursive_flag(verify); /*************************************************************************************************************** * Subcommand: write_solidity_verifier @@ -428,89 +409,7 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_verbose_flag(write_solidity_verifier); remove_zk_option(write_solidity_verifier); add_crs_path_option(write_solidity_verifier); - - /*************************************************************************************************************** - * Subcommand: OLD_API - ***************************************************************************************************************/ - CLI::App* OLD_API = app.add_subcommand("OLD_API", "Access some old API commands"); - - /*************************************************************************************************************** - * Subcommand: OLD_API gates_for_ivc - ***************************************************************************************************************/ - CLI::App* OLD_API_gates_for_ivc = OLD_API->add_subcommand("gates_for_ivc", ""); - add_verbose_flag(OLD_API_gates_for_ivc); - add_debug_flag(OLD_API_gates_for_ivc); - add_crs_path_option(OLD_API_gates_for_ivc); - add_bytecode_path_option(OLD_API_gates_for_ivc); - - /*************************************************************************************************************** - * Subcommand: OLD_API gates_mega_honk - ***************************************************************************************************************/ - CLI::App* OLD_API_gates_mega_honk = OLD_API->add_subcommand("gates_mega_honk", ""); - add_verbose_flag(OLD_API_gates_mega_honk); - add_debug_flag(OLD_API_gates_mega_honk); - add_crs_path_option(OLD_API_gates_mega_honk); - add_recursive_flag(OLD_API_gates_mega_honk); - add_honk_recursion_option(OLD_API_gates_mega_honk); - add_bytecode_path_option(OLD_API_gates_mega_honk); - - /*************************************************************************************************************** - * Subcommand: OLD_API write_arbitrary_valid_client_ivc_proof_and_vk_to_file - ***************************************************************************************************************/ - CLI::App* OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file = - OLD_API->add_subcommand("write_arbitrary_valid_client_ivc_proof_and_vk_to_file", ""); - add_verbose_flag(OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file); - add_debug_flag(OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file); - add_crs_path_option(OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file); - std::string arbitrary_valid_proof_path{ "./proofs/proof" }; - add_output_path_option(OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file, arbitrary_valid_proof_path); - - /*************************************************************************************************************** - * Subcommand: OLD_API write_recursion_inputs_ultra_honk - ***************************************************************************************************************/ - CLI::App* OLD_API_write_recursion_inputs_ultra_honk = - OLD_API->add_subcommand("write_recursion_inputs_ultra_honk", ""); - add_verbose_flag(OLD_API_write_recursion_inputs_ultra_honk); - add_debug_flag(OLD_API_write_recursion_inputs_ultra_honk); - add_crs_path_option(OLD_API_write_recursion_inputs_ultra_honk); - std::string recursion_inputs_output_path{ "./target" }; - add_output_path_option(OLD_API_write_recursion_inputs_ultra_honk, recursion_inputs_output_path); - add_ipa_accumulation_flag(OLD_API_write_recursion_inputs_ultra_honk); - add_recursive_flag(OLD_API_write_recursion_inputs_ultra_honk); - add_bytecode_path_option(OLD_API_write_recursion_inputs_ultra_honk); - - /*************************************************************************************************************** - * Subcommand: OLD_API gates - ***************************************************************************************************************/ - CLI::App* OLD_API_gates = OLD_API->add_subcommand("gates", ""); - add_verbose_flag(OLD_API_gates); - add_debug_flag(OLD_API_gates); - add_crs_path_option(OLD_API_gates); - add_recursive_flag(OLD_API_gates); - add_honk_recursion_option(OLD_API_gates); - add_bytecode_path_option(OLD_API_gates); - - /*************************************************************************************************************** - * Subcommand: OLD_API verify - ***************************************************************************************************************/ - CLI::App* OLD_API_verify = OLD_API->add_subcommand("verify", ""); - add_verbose_flag(OLD_API_verify); - add_debug_flag(OLD_API_verify); - add_crs_path_option(OLD_API_verify); - add_bytecode_path_option(OLD_API_verify); - add_proof_path_option(OLD_API_verify); - add_vk_path_option(OLD_API_verify); - add_recursive_flag(OLD_API_verify); - - /*************************************************************************************************************** - * Subcommand: OLD_API prove_and_verify - ***************************************************************************************************************/ - CLI::App* OLD_API_prove_and_verify = OLD_API->add_subcommand("prove_and_verify", ""); - add_verbose_flag(OLD_API_prove_and_verify); - add_debug_flag(OLD_API_prove_and_verify); - add_crs_path_option(OLD_API_prove_and_verify); - add_recursive_flag(OLD_API_prove_and_verify); - add_bytecode_path_option(OLD_API_prove_and_verify); + add_optimized_solidity_verifier_flag(write_solidity_verifier); std::filesystem::path avm_inputs_path{ "./target/avm_inputs.bin" }; const auto add_avm_inputs_option = [&](CLI::App* subcommand) { @@ -521,6 +420,15 @@ int parse_and_run_cli_command(int argc, char* argv[]) return subcommand->add_option("--avm-public-inputs", avm_public_inputs_path, ""); }; + /*************************************************************************************************************** + * Subcommand: avm_simulate + ***************************************************************************************************************/ + CLI::App* avm_simulate_command = app.add_subcommand("avm_simulate", ""); + avm_simulate_command->group(""); // hide from list of subcommands + add_verbose_flag(avm_simulate_command); + add_debug_flag(avm_simulate_command); + add_avm_inputs_option(avm_simulate_command); + /*************************************************************************************************************** * Subcommand: avm_prove ***************************************************************************************************************/ @@ -555,6 +463,29 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_proof_path_option(avm_verify_command); add_vk_path_option(avm_verify_command); + /*************************************************************************************************************** + * Subcommand: aztec_process_artifact + ***************************************************************************************************************/ + CLI::App* aztec_process = app.add_subcommand( + "aztec_process", + "Process Aztec contract artifacts: transpile and generate verification keys for all private functions.\n" + "If input is a directory (and no output specified), recursively processes all artifacts found in the " + "directory."); + + std::string artifact_input_path; + std::string artifact_output_path; + bool force_regenerate = false; + + aztec_process->add_option( + "-i,--input", + artifact_input_path, + "Input artifact JSON path or directory to search (optional, defaults to current directory)"); + aztec_process->add_option( + "-o,--output", artifact_output_path, "Output artifact JSON path (optional, same as input if not specified)"); + aztec_process->add_flag("-f,--force", force_regenerate, "Force regeneration of verification keys"); + add_verbose_flag(aztec_process); + add_debug_flag(aztec_process); + /*************************************************************************************************************** * Subcommand: msgpack ***************************************************************************************************************/ @@ -573,30 +504,6 @@ int parse_and_run_cli_command(int argc, char* argv[]) msgpack_run_command->add_option( "-i,--input", msgpack_input_file, "Input file containing msgpack buffers (defaults to stdin)"); - /*************************************************************************************************************** - * Subcommand: prove_tube - ***************************************************************************************************************/ - CLI ::App* prove_tube_command = app.add_subcommand("prove_tube", ""); - prove_tube_command->group(""); // hide from list of subcommands - add_verbose_flag(prove_tube_command); - add_debug_flag(prove_tube_command); - add_crs_path_option(prove_tube_command); - add_vk_path_option(prove_tube_command); - std::string prove_tube_output_path{ "./target" }; - add_output_path_option(prove_tube_command, prove_tube_output_path); - - /*************************************************************************************************************** - * Subcommand: verify_tube - ***************************************************************************************************************/ - CLI::App* verify_tube_command = app.add_subcommand("verify_tube", ""); - verify_tube_command->group(""); // hide from list of subcommands - add_verbose_flag(verify_tube_command); - add_debug_flag(verify_tube_command); - add_crs_path_option(verify_tube_command); - // doesn't make sense that this is set by -o but that's how it was - std::string tube_proof_and_vk_path{ "./target" }; - add_output_path_option(verify_tube_command, tube_proof_and_vk_path); - /*************************************************************************************************************** * Build the CLI11 App ***************************************************************************************************************/ @@ -612,6 +519,14 @@ int parse_and_run_cli_command(int argc, char* argv[]) debug_logging = flags.debug; verbose_logging = debug_logging || flags.verbose; slow_low_memory = flags.slow_low_memory; +#ifndef __wasm__ + if (print_op_counts || !op_counts_out.empty()) { + bb::detail::use_op_count_time = true; + } + if (bb::detail::use_op_count_time) { + bb::detail::GLOBAL_OP_COUNTS.clear(); + } +#endif print_active_subcommands(app); info("Scheme is: ", flags.scheme, ", num threads: ", get_num_cpus()); @@ -658,18 +573,28 @@ int parse_and_run_cli_command(int argc, char* argv[]) if (msgpack_run_command->parsed()) { return execute_msgpack_run(msgpack_input_file); } - // TUBE - if (prove_tube_command->parsed()) { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1201): Potentially remove this extra logic. - prove_tube(prove_tube_output_path, vk_path); - } else if (verify_tube_command->parsed()) { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1322): Remove verify_tube logic. - auto tube_public_inputs_path = tube_proof_and_vk_path + "/public_inputs"; - auto tube_proof_path = tube_proof_and_vk_path + "/proof"; - auto tube_vk_path = tube_proof_and_vk_path + "/vk"; - UltraHonkAPI api; - return api.verify({ .ipa_accumulation = true }, tube_public_inputs_path, tube_proof_path, tube_vk_path) ? 0 - : 1; + if (aztec_process->parsed()) { +#ifdef __wasm__ + throw_or_abort("Aztec artifact processing is not supported in WASM builds."); +#else + // Default input to current directory if not specified + std::string input = artifact_input_path.empty() ? "." : artifact_input_path; + + // Check if input is a directory + if (std::filesystem::is_directory(input)) { + // If output specified for directory input, that's an error + if (!artifact_output_path.empty()) { + throw_or_abort( + "Cannot specify --output when input is a directory. Artifacts are updated in-place."); + } + // Recursively process all artifacts in directory + return process_all_artifacts(input, force_regenerate) ? 0 : 1; + } + + // Input is a file, process single artifact + std::string output = artifact_output_path.empty() ? input : artifact_output_path; + return process_aztec_artifact(input, output, force_regenerate) ? 0 : 1; +#endif } // AVM #ifndef DISABLE_AZTEC_VM @@ -680,38 +605,15 @@ int parse_and_run_cli_command(int argc, char* argv[]) avm_check_circuit(avm_inputs_path); } else if (avm_verify_command->parsed()) { return avm_verify(proof_path, avm_public_inputs_path, vk_path) ? 0 : 1; + } else if (avm_simulate_command->parsed()) { + avm_simulate(avm_inputs_path); } #else - else if (avm_prove_command->parsed()) { - throw_or_abort("The Aztec Virtual Machine (AVM) is disabled in this environment!"); - } else if (avm_check_circuit_command->parsed()) { - throw_or_abort("The Aztec Virtual Machine (AVM) is disabled in this environment!"); - } else if (avm_verify_command->parsed()) { + else if (avm_prove_command->parsed() || avm_check_circuit_command->parsed() || avm_verify_command->parsed() || + avm_simulate_command->parsed()) { throw_or_abort("The Aztec Virtual Machine (AVM) is disabled in this environment!"); } #endif - // CLIENT IVC EXTRA COMMAND - else if (OLD_API_gates_for_ivc->parsed()) { - gate_count_for_ivc(bytecode_path, true); - } else if (OLD_API_gates_mega_honk->parsed()) { - gate_count(bytecode_path, flags.recursive, flags.honk_recursion, true); - } else if (OLD_API_write_arbitrary_valid_client_ivc_proof_and_vk_to_file->parsed()) { - write_arbitrary_valid_client_ivc_proof_and_vk_to_file(arbitrary_valid_proof_path); - return 0; - } - // ULTRA HONK EXTRA COMMANDS - else if (OLD_API_write_recursion_inputs_ultra_honk->parsed()) { - if (flags.ipa_accumulation) { - write_recursion_inputs_ultra_honk( - bytecode_path, witness_path, recursion_inputs_output_path); - } else { - write_recursion_inputs_ultra_honk( - bytecode_path, witness_path, recursion_inputs_output_path); - } - } - // NEW STANDARD API - // NOTE(AD): We likely won't really have a standard API if our main flavours are UH or CIVC, with CIVC so - // different else if (flags.scheme == "client_ivc") { ClientIVCAPI api; if (prove->parsed()) { @@ -720,6 +622,15 @@ int parse_and_run_cli_command(int argc, char* argv[]) " (default ./ivc-inputs.msgpack)"); } api.prove(flags, ivc_inputs_path, output_path); +#ifndef __wasm__ + if (print_op_counts) { + bb::detail::GLOBAL_OP_COUNTS.print_aggregate_counts(std::cout, 0); + } + if (!op_counts_out.empty()) { + std::ofstream file(op_counts_out); + bb::detail::GLOBAL_OP_COUNTS.print_aggregate_counts(file, 2); + } +#endif return 0; } if (check->parsed()) { @@ -734,6 +645,15 @@ int parse_and_run_cli_command(int argc, char* argv[]) UltraHonkAPI api; if (prove->parsed()) { api.prove(flags, bytecode_path, witness_path, vk_path, output_path); +#ifndef __wasm__ + if (print_op_counts) { + bb::detail::GLOBAL_OP_COUNTS.print_aggregate_counts(std::cout, 0); + } + if (!op_counts_out.empty()) { + std::ofstream file(op_counts_out); + bb::detail::GLOBAL_OP_COUNTS.print_aggregate_counts(file, 2); + } +#endif return 0; } return execute_non_prove_command(api); diff --git a/barretenberg/cpp/src/barretenberg/bb/readme.md b/barretenberg/cpp/src/barretenberg/bb/readme.md index 7ccac83c03ee..8f25e5ff2893 100644 --- a/barretenberg/cpp/src/barretenberg/bb/readme.md +++ b/barretenberg/cpp/src/barretenberg/bb/readme.md @@ -79,7 +79,7 @@ Barretenberg UltraHonk comes with the capability to verify proofs in Solidity, i bb prove --scheme ultra_honk --oracle-hash keccak -b ./target/hello_world.json -w ./target/witness-name.gz -o ./target/proof ``` - > **Note:** `--oracle-hash keccak` flag is used to generate UltraHonk proofs with Keccak hashes, as it is what the Solidity verifier is designed to be compatible with given the better gas efficiency when verifying on-chain; The default `--oracle-hash poseidon` in comparison generates proofs with Poseidon hashes, which is more efficient in recursions but not for on-chain verifications. + > **Note:** `--oracle-hash keccak` flag is used to generate UltraHonk proofs with Keccak hashes, as it is what the Solidity verifier is designed to be compatible with given the better gas efficiency when verifying onchain; The default `--oracle-hash poseidon` in comparison generates proofs with Poseidon hashes, which is more efficient in recursions but not for onchain verifications. 2. Compute the verification key for your Noir program running: diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi.test.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi.test.cpp index ea88f1af0169..043709e39c26 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi.test.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi.test.cpp @@ -13,14 +13,10 @@ template class BBApiSerializationTest : public ::testing::Test {}; // Enumerate each command type using Commands = ::testing::Types(num_circuits, request.trace_settings); request.ivc_stack_depth = 0; return Response{}; @@ -22,6 +23,7 @@ ClientIvcStart::Response ClientIvcStart::execute(BBApiRequest& request) && ClientIvcLoad::Response ClientIvcLoad::execute(BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); if (!request.ivc_in_progress) { throw_or_abort("ClientIVC not started. Call ClientIvcStart first."); } @@ -37,6 +39,7 @@ ClientIvcLoad::Response ClientIvcLoad::execute(BBApiRequest& request) && ClientIvcAccumulate::Response ClientIvcAccumulate::execute(BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); if (!request.ivc_in_progress) { throw_or_abort("ClientIVC not started. Call ClientIvcStart first."); } @@ -53,6 +56,7 @@ ClientIvcAccumulate::Response ClientIvcAccumulate::execute(BBApiRequest& request std::shared_ptr precomputed_vk; if (!request.loaded_circuit_vk.empty()) { + // Deserialize directly from buffer precomputed_vk = from_buffer>(request.loaded_circuit_vk); } @@ -68,6 +72,7 @@ ClientIvcAccumulate::Response ClientIvcAccumulate::execute(BBApiRequest& request ClientIvcProve::Response ClientIvcProve::execute(BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); if (!request.ivc_in_progress) { throw_or_abort("ClientIVC not started. Call ClientIvcStart first."); } @@ -77,15 +82,12 @@ ClientIvcProve::Response ClientIvcProve::execute(BBApiRequest& request) && } info("ClientIvcProve - generating proof for ", request.ivc_stack_depth, " accumulated circuits"); - // Construct the hiding kernel to finalise the IVC steps - ClientIVC::ClientCircuit circuit{ request.ivc_in_progress->goblin.op_queue }; - request.ivc_in_progress->complete_kernel_circuit_logic(circuit); ClientIVC::Proof proof = request.ivc_in_progress->prove(); - // We verify this proof. Another bb call to verify has some overhead of loading VK/proof/SRS, // and it is mysterious if this transaction fails later in the lifecycle. info("ClientIvcProve - verifying the generated proof as a sanity check"); - if (!request.ivc_in_progress->verify(proof)) { + ClientIVC::VerificationKey vk = request.ivc_in_progress->get_vk(); + if (!ClientIVC::verify(proof, vk)) { throw_or_abort("Failed to verify the generated proof!"); } @@ -99,8 +101,9 @@ ClientIvcProve::Response ClientIvcProve::execute(BBApiRequest& request) && ClientIvcVerify::Response ClientIvcVerify::execute(const BBApiRequest& /*request*/) && { - // Deserialize the verification key from the byte buffer - const auto verification_key = from_buffer(vk); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + // Deserialize the verification key directly from buffer + ClientIVC::VerificationKey verification_key = from_buffer(vk); // Verify the proof using ClientIVC's static verify method const bool verified = ClientIVC::verify(proof, verification_key); @@ -108,74 +111,46 @@ ClientIvcVerify::Response ClientIvcVerify::execute(const BBApiRequest& /*request return { .valid = verified }; } -static std::shared_ptr get_acir_program_decider_proving_key( - const BBApiRequest& request, acir_format::AcirProgram& program) +static std::shared_ptr get_acir_program_prover_instance(const BBApiRequest& request, + acir_format::AcirProgram& program) { ClientIVC::ClientCircuit builder = acir_format::create_circuit(program); // Construct the verification key via the prover-constructed proving key with the proper trace settings - return std::make_shared(builder, request.trace_settings); + return std::make_shared(builder, request.trace_settings); } -ClientIVC::VerificationKey compute_civc_vk(const BBApiRequest& request, size_t num_public_inputs_in_final_circuit) -{ - ClientIVC ivc{ /* num_circuits */ 2, request.trace_settings }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - - // Initialize the IVC with an arbitrary circuit - // We segfault if we only call accumulate once - static constexpr size_t SMALL_ARBITRARY_LOG_CIRCUIT_SIZE{ 5 }; - auto [circuit_0, vk_0] = - circuit_producer.create_next_circuit_and_vk(ivc, { .log2_num_gates = SMALL_ARBITRARY_LOG_CIRCUIT_SIZE }); - ivc.accumulate(circuit_0, vk_0); - - // Create another circuit and accumulate - auto [circuit_1, vk_1] = - circuit_producer.create_next_circuit_and_vk(ivc, - { - .num_public_inputs = num_public_inputs_in_final_circuit, - .log2_num_gates = SMALL_ARBITRARY_LOG_CIRCUIT_SIZE, - }); - ivc.accumulate(circuit_1, vk_1); - - circuit_producer.construct_hiding_kernel(ivc); - // Construct the hiding circuit proving and verification key - auto hiding_decider_pk = ivc.compute_hiding_circuit_proving_key(); - auto hiding_honk_vk = std::make_shared(hiding_decider_pk->get_precomputed()); - return { hiding_honk_vk, - std::make_shared(), - std::make_shared() }; -} - -ClientIvcComputeStandaloneVk::Response ClientIvcComputeStandaloneVk::execute(BB_UNUSED const BBApiRequest& request) && +ClientIvcComputeStandaloneVk::Response ClientIvcComputeStandaloneVk::execute(const BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); info("ClientIvcComputeStandaloneVk - deriving VK for circuit '", circuit.name, "'"); auto constraint_system = acir_format::circuit_buf_to_acir_format(std::move(circuit.bytecode)); acir_format::AcirProgram program{ constraint_system, /*witness=*/{} }; - std::shared_ptr proving_key = get_acir_program_decider_proving_key(request, program); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - - Response response; - response.bytes = to_buffer(*verification_key); - response.fields = verification_key->to_field_elements(); - - info("ClientIvcComputeStandaloneVk - VK derived, size: ", response.bytes.size(), " bytes"); + std::shared_ptr prover_instance = get_acir_program_prover_instance(request, program); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); - return response; + return { .bytes = to_buffer(*verification_key), .fields = verification_key->to_field_elements() }; } -ClientIvcComputeIvcVk::Response ClientIvcComputeIvcVk::execute(const BBApiRequest& request) && +ClientIvcComputeIvcVk::Response ClientIvcComputeIvcVk::execute(BB_UNUSED const BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); info("ClientIvcComputeIvcVk - deriving IVC VK for circuit '", circuit.name, "'"); - auto constraint_system = acir_format::circuit_buf_to_acir_format(std::move(circuit.bytecode)); - - auto vk = compute_civc_vk(request, constraint_system.public_inputs.size()); + auto standalone_vk_response = bbapi::ClientIvcComputeStandaloneVk{ + .circuit{ .name = "standalone_circuit", .bytecode = std::move(circuit.bytecode) } + }.execute({ .trace_settings = {} }); + auto mega_vk = from_buffer(standalone_vk_response.bytes); + auto eccvm_vk = std::make_shared(); + auto translator_vk = std::make_shared(); + ClientIVC::VerificationKey civc_vk{ .mega = std::make_shared(mega_vk), + .eccvm = std::make_shared(), + .translator = std::make_shared() }; Response response; - response.bytes = to_buffer(vk); + response.bytes = to_buffer(civc_vk); info("ClientIvcComputeIvcVk - IVC VK derived, size: ", response.bytes.size(), " bytes"); @@ -184,38 +159,40 @@ ClientIvcComputeIvcVk::Response ClientIvcComputeIvcVk::execute(const BBApiReques ClientIvcCheckPrecomputedVk::Response ClientIvcCheckPrecomputedVk::execute(const BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format(std::move(circuit.bytecode)), /*witness=*/{} }; - std::shared_ptr proving_key = get_acir_program_decider_proving_key(request, program); - auto computed_vk = std::make_shared(proving_key->get_precomputed()); + std::shared_ptr prover_instance = get_acir_program_prover_instance(request, program); + auto computed_vk = std::make_shared(prover_instance->get_precomputed()); if (circuit.verification_key.empty()) { info("FAIL: Expected precomputed vk for function ", circuit.name); throw_or_abort("Missing precomputed VK"); } + // Deserialize directly from buffer auto precomputed_vk = from_buffer>(circuit.verification_key); Response response; response.valid = true; - std::string error_message = "Precomputed vk does not match computed vk for function " + circuit.name; - if (!msgpack::msgpack_check_eq(*computed_vk, *precomputed_vk, error_message)) { + if (*computed_vk != *precomputed_vk) { response.valid = false; response.actual_vk = to_buffer(computed_vk); } return response; } -ClientIvcGates::Response ClientIvcGates::execute(BBApiRequest& request) && +ClientIvcStats::Response ClientIvcStats::execute(BBApiRequest& request) && { + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); Response response; const auto constraint_system = acir_format::circuit_buf_to_acir_format(std::move(circuit.bytecode)); acir_format::AcirProgram program{ constraint_system }; // Get IVC constraints if any - const auto& ivc_constraints = constraint_system.ivc_recursion_constraints; + const auto& ivc_constraints = constraint_system.pg_recursion_constraints; // Create metadata with appropriate IVC context acir_format::ProgramMetadata metadata{ @@ -229,7 +206,7 @@ ClientIvcGates::Response ClientIvcGates::execute(BBApiRequest& request) && builder.finalize_circuit(/*ensure_nonzero=*/true); // Set response values - response.acir_opcodes = static_cast(program.constraints.num_acir_opcodes); + response.acir_opcodes = program.constraints.num_acir_opcodes; response.circuit_size = static_cast(builder.num_gates); // Optionally include gates per opcode @@ -239,7 +216,7 @@ ClientIvcGates::Response ClientIvcGates::execute(BBApiRequest& request) && } // Log circuit details - info("ClientIvcGates - circuit: ", + info("ClientIvcStats - circuit: ", circuit.name, ", acir_opcodes: ", response.acir_opcodes, diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.hpp index 226d6075f339..fed4ffe28ae9 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.hpp @@ -15,14 +15,6 @@ namespace bb::bbapi { -/** - * @brief Helper function to compute verification key for IVC - * @param request The API request context - * @param num_public_inputs_in_final_circuit Number of public inputs in the final circuit - * @return The computed IVC verification key - */ -ClientIVC::VerificationKey compute_civc_vk(const BBApiRequest& request, size_t num_public_inputs_in_final_circuit); - /** * @struct ClientIvcStart * @brief Initialize a new ClientIVC instance for incremental proof accumulation @@ -30,14 +22,14 @@ ClientIVC::VerificationKey compute_civc_vk(const BBApiRequest& request, size_t n * @note Only one IVC request can be made at a time for each batch_request. */ struct ClientIvcStart { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcStart"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcStart"; /** * @struct Response * @brief Empty response indicating successful initialization */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcStartResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcStartResponse"; // Empty response - success indicated by no exception void msgpack(auto&& pack_fn) { pack_fn(); } bool operator==(const Response&) const = default; @@ -54,14 +46,14 @@ struct ClientIvcStart { * @brief Load a circuit into the ClientIVC instance for accumulation */ struct ClientIvcLoad { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcLoad"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcLoad"; /** * @struct Response * @brief Empty response indicating successful circuit loading */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcLoadResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcLoadResponse"; // Empty response - success indicated by no exception void msgpack(auto&& pack_fn) { pack_fn(); } bool operator==(const Response&) const = default; @@ -79,14 +71,14 @@ struct ClientIvcLoad { * @brief Accumulate the previously loaded circuit into the IVC proof */ struct ClientIvcAccumulate { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcAccumulate"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcAccumulate"; /** * @struct Response * @brief Empty response indicating successful circuit accumulation */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcAccumulateResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcAccumulateResponse"; // Empty response - success indicated by no exception void msgpack(auto&& pack_fn) { pack_fn(); } bool operator==(const Response&) const = default; @@ -104,14 +96,14 @@ struct ClientIvcAccumulate { * @brief Generate a proof for all accumulated circuits */ struct ClientIvcProve { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcProve"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcProve"; /** * @struct Response * @brief Contains the generated IVC proof */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcProveResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcProveResponse"; /** @brief Complete IVC proof for all accumulated circuits */ ClientIVC::Proof proof; @@ -128,14 +120,14 @@ struct ClientIvcProve { * @brief Verify a ClientIVC proof with its verification key */ struct ClientIvcVerify { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcVerify"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcVerify"; /** * @struct Response * @brief Contains the verification result */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcVerifyResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcVerifyResponse"; /** @brief True if the proof is valid */ bool valid; @@ -157,14 +149,14 @@ struct ClientIvcVerify { * @brief Compute standalone verification key for a circuit */ struct ClientIvcComputeStandaloneVk { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcComputeStandaloneVk"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcComputeStandaloneVk"; /** * @struct Response * @brief Contains the computed verification key in multiple formats */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcComputeStandaloneVkResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcComputeStandaloneVkResponse"; /** @brief Serialized verification key in binary format */ std::vector bytes; @@ -185,14 +177,14 @@ struct ClientIvcComputeStandaloneVk { * @brief Compute IVC verification key for the complete proof */ struct ClientIvcComputeIvcVk { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcComputeIvcVk"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcComputeIvcVk"; /** * @struct Response * @brief Contains the computed IVC verification key */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcComputeIvcVkResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcComputeIvcVkResponse"; /** @brief Serialized IVC verification key in binary format */ std::vector bytes; @@ -211,14 +203,14 @@ struct ClientIvcComputeIvcVk { * @brief Verify that a precomputed verification key matches the circuit */ struct ClientIvcCheckPrecomputedVk { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcCheckPrecomputedVk"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcCheckPrecomputedVk"; /** * @struct Response * @brief Contains the validation result */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcCheckPrecomputedVkResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcCheckPrecomputedVkResponse"; /** @brief True if the precomputed VK matches the circuit */ bool valid; @@ -237,18 +229,18 @@ struct ClientIvcCheckPrecomputedVk { }; /** - * @struct ClientIvcGates + * @struct ClientIvcStats * @brief Get gate counts for a circuit */ -struct ClientIvcGates { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcGates"; +struct ClientIvcStats { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcStats"; /** * @struct Response * @brief Contains gate count information */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIvcGatesResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIvcStatsResponse"; /** @brief Number of ACIR opcodes */ uint32_t acir_opcodes; @@ -266,7 +258,7 @@ struct ClientIvcGates { bool include_gates_per_opcode; Response execute(BBApiRequest& request) &&; MSGPACK_FIELDS(circuit, include_gates_per_opcode); - bool operator==(const ClientIvcGates&) const = default; + bool operator==(const ClientIvcStats&) const = default; }; } // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.test.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.test.cpp new file mode 100644 index 000000000000..b144e7adf1f4 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_client_ivc.test.cpp @@ -0,0 +1,47 @@ +#include "barretenberg/bbapi/bbapi_client_ivc.hpp" +#include "barretenberg/client_ivc/acir_bincode_mocks.hpp" +#include "barretenberg/client_ivc/client_ivc.hpp" +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/dsl/acir_format/acir_format.hpp" +#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" +#include "barretenberg/flavor/mega_flavor.hpp" +#include + +namespace bb::bbapi { + +class BBApiClientIvcTest : public ::testing::Test { + protected: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } +}; + +TEST_F(BBApiClientIvcTest, StandaloneVerificationKeySerialization) +{ + auto [bytecode, witness] = acir_bincode_mocks::create_simple_circuit_bytecode(); + + bbapi::ProofSystemSettings settings{ .ipa_accumulation = false, + .oracle_hash_type = "poseidon2", + .disable_zk = true }; + + // Compute standalone VK using ClientIvcComputeStandaloneVk + auto vk_response = + ClientIvcComputeStandaloneVk{ .circuit = { .name = "test_circuit", .bytecode = bytecode } }.execute(); + + // Create a VK from the field elements + auto vk = + std::make_shared(from_buffer(vk_response.bytes)); + EXPECT_EQ(vk->to_field_elements(), vk_response.fields) + << "Serialized field elements should match original field elements"; +} + +TEST_F(BBApiClientIvcTest, ClientIvcVkSerialization) +{ + auto [bytecode, _witness] = acir_bincode_mocks::create_simple_circuit_bytecode(); + auto vk_response = ClientIvcComputeIvcVk{ .circuit = { .name = "test_circuit", .bytecode = bytecode } }.execute(); + + // Create a VK from the field elements + ClientIVC::VerificationKey vk = from_buffer(vk_response.bytes); + EXPECT_EQ(to_buffer(vk.to_field_elements()), vk_response.bytes) + << "Serialized field elements should match original field elements"; +} + +} // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp index ac3e412e4cc2..b127f020a9e2 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp @@ -10,8 +10,7 @@ namespace bb::bbapi { using Command = NamedUnion; + ClientIvcStats>; using CommandResponse = NamedUnion; + ClientIvcStats::Response>; /** * @brief Executes a command by visiting a variant of all possible commands. diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp index fd31d533f92b..515e783f5d89 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp @@ -92,19 +92,10 @@ struct ProofSystemSettings { */ bool disable_zk = false; - /** - * @brief Honk recursion setting. - * 0 = no recursion, 1 = UltraHonk recursion, 2 = UltraRollupHonk recursion. - * Controls whether pairing point accumulators and IPA claims are added to public inputs. - */ - uint32_t honk_recursion = 0; - - /** - * @brief Flag to indicate if this circuit will be recursively verified. - */ - bool recursive = false; + // TODO(md): remove this once considered stable + bool optimized_solidity_verifier = false; - MSGPACK_FIELDS(ipa_accumulation, oracle_hash_type, disable_zk, honk_recursion, recursive); + MSGPACK_FIELDS(ipa_accumulation, oracle_hash_type, disable_zk, optimized_solidity_verifier); bool operator==(const ProofSystemSettings& other) const = default; }; diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp index 9c8f2b9fc097..a76bf68eabac 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp @@ -1,68 +1,398 @@ #include "barretenberg/bbapi/bbapi_ultra_honk.hpp" #include "barretenberg/bbapi/bbapi_shared.hpp" #include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/commitment_schemes/ipa/ipa.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/constants.hpp" #include "barretenberg/dsl/acir_format/acir_format.hpp" #include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" #include "barretenberg/dsl/acir_format/serde/witness_stack.hpp" +#include "barretenberg/dsl/acir_proofs/honk_contract.hpp" +#include "barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp" +#include "barretenberg/dsl/acir_proofs/honk_zk_contract.hpp" #include "barretenberg/flavor/mega_flavor.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/flavor/ultra_flavor.hpp" +#include "barretenberg/flavor/ultra_keccak_flavor.hpp" +#include "barretenberg/flavor/ultra_keccak_zk_flavor.hpp" +#include "barretenberg/flavor/ultra_rollup_flavor.hpp" +#include "barretenberg/flavor/ultra_zk_flavor.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" +#include "barretenberg/special_public_inputs/special_public_inputs.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" +#include +#ifdef STARKNET_GARAGA_FLAVORS +#include "barretenberg/flavor/ultra_starknet_flavor.hpp" +#include "barretenberg/flavor/ultra_starknet_zk_flavor.hpp" +#endif #include #include namespace bb::bbapi { -CircuitProve::Response CircuitProve::execute(BB_UNUSED const BBApiRequest& request) && +template acir_format::ProgramMetadata _create_program_metadata() { - throw_or_abort("not implemented yet!"); + uint32_t honk_recursion = 0; + + if constexpr (IsAnyOf) { + honk_recursion = 1; + } else if constexpr (IsAnyOf) { + honk_recursion = 2; + } +#ifdef STARKNET_GARAGA_FLAVORS + if constexpr (IsAnyOf) { + honk_recursion = 1; + } +#endif + + return acir_format::ProgramMetadata{ .honk_recursion = honk_recursion }; } -CircuitComputeVk::Response CircuitComputeVk::execute(BB_UNUSED const BBApiRequest& request) && +template +Circuit _compute_circuit(std::vector&& bytecode, std::vector&& witness) { - throw_or_abort("not implemented yet!"); + const acir_format::ProgramMetadata metadata = _create_program_metadata(); + acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format(std::move(bytecode)) }; + + if (!witness.empty()) { + program.witness = acir_format::witness_buf_to_witness_data(std::move(witness)); + } + return acir_format::create_circuit(program, metadata); } -CircuitInfo::Response CircuitInfo::execute(BB_UNUSED const BBApiRequest& request) && +template +std::shared_ptr> _compute_prover_instance(std::vector&& bytecode, + std::vector&& witness) { - throw_or_abort("not implemented yet!"); + // Measure function time and debug print + auto initial_time = std::chrono::high_resolution_clock::now(); + typename Flavor::CircuitBuilder builder = _compute_circuit(std::move(bytecode), std::move(witness)); + auto prover_instance = std::make_shared>(builder); + auto final_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(final_time - initial_time); + info("CircuitProve: Proving key computed in ", duration.count(), " ms"); + return prover_instance; +} +template +CircuitProve::Response _prove(std::vector&& bytecode, + std::vector&& witness, + std::vector&& vk_bytes) +{ + using Proof = typename Flavor::Transcript::Proof; + + auto prover_instance = _compute_prover_instance(std::move(bytecode), std::move(witness)); + std::shared_ptr vk; + if (vk_bytes.empty()) { + info("WARNING: computing verification key while proving. Pass in a precomputed vk for better performance."); + vk = std::make_shared(prover_instance->get_precomputed()); + } else { + vk = + std::make_shared(from_buffer(vk_bytes)); + } + + UltraProver_ prover{ prover_instance, vk }; + + Proof concat_pi_and_proof = prover.construct_proof(); + // Compute number of inner public inputs. Perform loose checks that the public inputs contain enough data. + auto num_inner_public_inputs = [&]() { + size_t num_public_inputs = prover.prover_instance->num_public_inputs(); + if constexpr (HasIPAAccumulator) { + BB_ASSERT_GTE(num_public_inputs, + RollupIO::PUBLIC_INPUTS_SIZE, + "Public inputs should contain a pairing point accumulator and an IPA claim."); + return num_public_inputs - RollupIO::PUBLIC_INPUTS_SIZE; + } else { + BB_ASSERT_GTE(num_public_inputs, + DefaultIO::PUBLIC_INPUTS_SIZE, + "Public inputs should contain a pairing point accumulator."); + return num_public_inputs - DefaultIO::PUBLIC_INPUTS_SIZE; + } + }(); + CircuitComputeVk::Response vk_response; + // Optimization over calling CircuitComputeVk separately - if vk not provided, we write it. + if (vk_bytes.empty()) { + auto vk_fields_direct = vk->to_field_elements(); + std::vector vk_fields; + // Handle discrepancy in type of 'to_field_elements' + if constexpr (std::is_same_v>) { + vk_fields = std::move(vk_fields_direct); + } else { + vk_fields = std::vector(vk_fields_direct.begin(), vk_fields_direct.end()); + } + vk_response = { .bytes = vk_bytes.empty() ? to_buffer(vk) : vk_bytes, + .fields = std::move(vk_fields), + .hash = to_buffer(vk->hash()) }; + } + + // We split the inner public inputs, which are stored at the front of the proof, from the rest of the proof. Now, + // the "proof" refers to everything except the inner public inputs. + return { .public_inputs = std::vector{ concat_pi_and_proof.begin(), + concat_pi_and_proof.begin() + + static_cast(num_inner_public_inputs) }, + .proof = std::vector{ concat_pi_and_proof.begin() + + static_cast(num_inner_public_inputs), + concat_pi_and_proof.end() }, + .vk = std::move(vk_response) }; } -CircuitCheck::Response CircuitCheck::execute(BB_UNUSED const BBApiRequest& request) && +template +bool _verify(const bool ipa_accumulation, + const std::vector& vk_bytes, + const std::vector& public_inputs, + const std::vector& proof) { - throw_or_abort("not implemented yet!"); + using VerificationKey = typename Flavor::VerificationKey; + using Verifier = UltraVerifier_; + using Transcript = typename Flavor::Transcript; + using DataType = typename Transcript::DataType; + using Proof = typename Transcript::Proof; + + std::shared_ptr vk = std::make_shared(from_buffer(vk_bytes)); + + // concatenate public inputs and proof + std::vector complete_proof; + complete_proof.reserve(public_inputs.size() + proof.size()); + complete_proof.insert(complete_proof.end(), public_inputs.begin(), public_inputs.end()); + complete_proof.insert(complete_proof.end(), proof.begin(), proof.end()); + + VerifierCommitmentKey ipa_verification_key; + if constexpr (HasIPAAccumulator) { + if (ipa_accumulation) { + ipa_verification_key = VerifierCommitmentKey(1 << CONST_ECCVM_LOG_N); + } + } + + Verifier verifier{ vk, ipa_verification_key }; + + bool verified = false; + if constexpr (HasIPAAccumulator) { + const size_t HONK_PROOF_LENGTH = Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() - IPA_PROOF_LENGTH; + const size_t num_public_inputs = static_cast(vk->num_public_inputs); + // The extra calculation is for the IPA proof length. + BB_ASSERT_EQ(complete_proof.size(), + HONK_PROOF_LENGTH + IPA_PROOF_LENGTH + num_public_inputs, + "Honk proof has incorrect length while verifying."); + const std::ptrdiff_t honk_proof_with_pub_inputs_length = + static_cast(HONK_PROOF_LENGTH + num_public_inputs); + auto ipa_proof = Proof(complete_proof.begin() + honk_proof_with_pub_inputs_length, complete_proof.end()); + auto honk_proof = Proof(complete_proof.begin(), complete_proof.begin() + honk_proof_with_pub_inputs_length); + verified = verifier.template verify_proof(complete_proof, ipa_proof).result; + } else { + verified = verifier.template verify_proof(complete_proof).result; + } + + if (verified) { + info("Proof verified successfully"); + } else { + info("Proof verification failed"); + } + + return verified; } -CircuitVerify::Response CircuitVerify::execute(BB_UNUSED const BBApiRequest& request) && +CircuitProve::Response CircuitProve::execute(BB_UNUSED const BBApiRequest& request) && { - throw_or_abort("not implemented yet!"); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + // if the ipa accumulation flag is set we are using the UltraRollupFlavor + if (settings.ipa_accumulation) { + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key)); + } + if (settings.oracle_hash_type == "poseidon2" && !settings.disable_zk) { + // if we are not disabling ZK and the oracle hash type is poseidon2, we are using the UltraZKFlavor + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key)); + } + if (settings.oracle_hash_type == "poseidon2" && settings.disable_zk) { + // if we are disabling ZK and the oracle hash type is poseidon2, we are using the UltraFlavor + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key)); + } + if (settings.oracle_hash_type == "keccak" && !settings.disable_zk) { + // if we are not disabling ZK and the oracle hash type is keccak, we are using the UltraKeccakZKFlavor + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key)); + } + if (settings.oracle_hash_type == "keccak" && settings.disable_zk) { + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key)); +#ifdef STARKNET_GARAGA_FLAVORS + } + if (settings.oracle_hash_type == "starknet" && settings.disable_zk) { + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key())); + } + if (settings.oracle_hash_type == "starknet" && !settings.disable_zk) { + return _prove( + std::move(circuit.bytecode), std::move(witness), std::move(circuit.verification_key())); +#endif + } + throw_or_abort("Invalid proving options specified in CircuitProve!"); } -ProofAsFields::Response ProofAsFields::execute(BB_UNUSED const BBApiRequest& request) && +CircuitComputeVk::Response CircuitComputeVk::execute(BB_UNUSED const BBApiRequest& request) && { - throw_or_abort("not implemented yet!"); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + std::vector vk_bytes; + std::vector vk_fields; + std::vector vk_hash_bytes; + + // Helper lambda to compute VK, fields, and hash for a given flavor + auto compute_vk_and_fields = [&]() { + auto prover_instance = _compute_prover_instance(std::move(circuit.bytecode), {}); + auto vk = std::make_shared(prover_instance->get_precomputed()); + vk_bytes = to_buffer(*vk); + if constexpr (IsAnyOf) { + vk_fields = vk->to_field_elements(); + } else { + // For other flavors, we use field elements + auto uint256_elements = vk->to_field_elements(); + vk_fields.reserve(uint256_elements.size()); + vk_fields.insert(vk_fields.end(), uint256_elements.begin(), uint256_elements.end()); + } + vk_hash_bytes = to_buffer(vk->hash()); + }; + + if (settings.ipa_accumulation) { + compute_vk_and_fields.template operator()(); + } else if (settings.oracle_hash_type == "poseidon2" && !settings.disable_zk) { + compute_vk_and_fields.template operator()(); + } else if (settings.oracle_hash_type == "poseidon2" && settings.disable_zk) { + compute_vk_and_fields.template operator()(); + } else if (settings.oracle_hash_type == "keccak" && !settings.disable_zk) { + compute_vk_and_fields.template operator()(); + } else if (settings.oracle_hash_type == "keccak" && settings.disable_zk) { + compute_vk_and_fields.template operator()(); +#ifdef STARKNET_GARAGA_FLAVORS + } else if (settings.oracle_hash_type == "starknet" && !settings.disable_zk) { + compute_vk_and_fields.template operator()(); + } else if (settings.oracle_hash_type == "starknet" && settings.disable_zk) { + compute_vk_and_fields.template operator()(); +#endif + } else { + throw_or_abort("invalid proof type in _write_vk"); + } + + return { .bytes = std::move(vk_bytes), .fields = std::move(vk_fields), .hash = std::move(vk_hash_bytes) }; } -VkAsFields::Response VkAsFields::execute(BB_UNUSED const BBApiRequest& request) && +template +CircuitStats::Response _stats(std::vector&& bytecode, bool include_gates_per_opcode) { - throw_or_abort("not implemented yet!"); + // Parse the circuit to get gate count information + auto constraint_system = acir_format::circuit_buf_to_acir_format(std::move(bytecode)); + + acir_format::ProgramMetadata metadata = _create_program_metadata(); + metadata.collect_gates_per_opcode = include_gates_per_opcode; + CircuitStats::Response response; + response.num_acir_opcodes = static_cast(constraint_system.num_acir_opcodes); + + acir_format::AcirProgram program{ std::move(constraint_system) }; + auto builder = acir_format::create_circuit(program, metadata); + builder.finalize_circuit(/*ensure_nonzero=*/true); + + response.num_gates = static_cast(builder.get_finalized_total_circuit_size()); + response.num_gates_dyadic = static_cast(builder.get_circuit_subgroup_size(response.num_gates)); + // note: will be empty if collect_gates_per_opcode is false + response.gates_per_opcode = std::move(program.constraints.gates_per_opcode); + + return response; } -CircuitWriteSolidityVerifier::Response CircuitWriteSolidityVerifier::execute(BB_UNUSED const BBApiRequest& request) && +CircuitStats::Response CircuitStats::execute(BB_UNUSED const BBApiRequest& request) && { - throw_or_abort("not implemented yet"); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + // if the ipa accumulation flag is set we are using the UltraRollupFlavor + if (settings.ipa_accumulation) { + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); + } + if (settings.oracle_hash_type == "poseidon2" && !settings.disable_zk) { + // if we are not disabling ZK and the oracle hash type is poseidon2, we are using the UltraZKFlavor + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); + } + if (settings.oracle_hash_type == "poseidon2" && settings.disable_zk) { + // if we are disabling ZK and the oracle hash type is poseidon2, we are using the UltraFlavor + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); + } + if (settings.oracle_hash_type == "keccak" && !settings.disable_zk) { + // if we are not disabling ZK and the oracle hash type is keccak, we are using the UltraKeccakZKFlavor + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); + } + if (settings.oracle_hash_type == "keccak" && settings.disable_zk) { + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); +#ifdef STARKNET_GARAGA_FLAVORS + } + if (settings.oracle_hash_type == "starknet" && settings.disable_zk) { + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); + } + if (settings.oracle_hash_type == "starknet" && !settings.disable_zk) { + return _stats(std::move(circuit.bytecode), include_gates_per_opcode); +#endif + } + throw_or_abort("Invalid proving options specified in CircuitStats!"); } -CircuitProveAndVerify::Response CircuitProveAndVerify::execute(BB_UNUSED const BBApiRequest& request) && +CircuitVerify::Response CircuitVerify::execute(BB_UNUSED const BBApiRequest& request) && { - throw_or_abort("not implemented yet!"); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + const bool ipa_accumulation = settings.ipa_accumulation; + bool verified = false; + + // if the ipa accumulation flag is set we are using the UltraRollupFlavor + if (ipa_accumulation) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); + } else if (settings.oracle_hash_type == "poseidon2" && !settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); + } else if (settings.oracle_hash_type == "poseidon2" && settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); + } else if (settings.oracle_hash_type == "keccak" && !settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); + } else if (settings.oracle_hash_type == "keccak" && settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); +#ifdef STARKNET_GARAGA_FLAVORS + } else if (settings.oracle_hash_type == "starknet" && !settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); + } else if (settings.oracle_hash_type == "starknet" && settings.disable_zk) { + verified = _verify(ipa_accumulation, verification_key, public_inputs, proof); +#endif + } else { + throw_or_abort("invalid proof type in _verify"); + } + + return { verified }; } -CircuitBenchmark::Response CircuitBenchmark::execute(BB_UNUSED const BBApiRequest& request) && +VkAsFields::Response VkAsFields::execute(BB_UNUSED const BBApiRequest& request) && +{ + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + std::vector fields; + + // Standard UltraHonk flavors + auto vk = from_buffer(verification_key); + fields = vk.to_field_elements(); + + return { std::move(fields) }; +} + +CircuitWriteSolidityVerifier::Response CircuitWriteSolidityVerifier::execute(BB_UNUSED const BBApiRequest& request) && { - throw_or_abort("not implemented yet!"); + BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); + using VK = UltraKeccakFlavor::VerificationKey; + auto vk = std::make_shared(from_buffer(verification_key)); + + std::string contract = settings.disable_zk ? get_honk_solidity_verifier(vk) : get_honk_zk_solidity_verifier(vk); + +// If in wasm, we dont include the optimized solidity verifier - due to its large bundle size +// This will run generate twice, but this should only be run before deployment and not frequently +#ifndef __wasm__ + if (settings.disable_zk && settings.optimized_solidity_verifier) { + contract = get_optimized_honk_solidity_verifier(vk); + } +#endif + + return { std::move(contract) }; } } // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.hpp index 13163493444c..cf8274858154 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.hpp @@ -9,26 +9,42 @@ #include "barretenberg/bbapi/bbapi_shared.hpp" #include "barretenberg/common/named_union.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/serialize/msgpack.hpp" +#include #include #include namespace bb::bbapi { -// CircuitInput, CircuitInputNoVK, and ProofSystemSettings are defined in bbapi_shared.hpp +struct CircuitComputeVk { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitComputeVk"; + + struct Response { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitComputeVkResponse"; + + std::vector bytes; // Serialized verification key + std::vector fields; // VK as field elements (unless keccak, then just uint256_t's) + std::vector hash; // The VK hash + MSGPACK_FIELDS(bytes, fields, hash); + bool operator==(const Response&) const = default; + }; + + CircuitInputNoVK circuit; + ProofSystemSettings settings; + MSGPACK_FIELDS(circuit, settings); + Response execute(const BBApiRequest& request = {}) &&; + bool operator==(const CircuitComputeVk&) const = default; +}; /** * @struct CircuitProve * @brief Represents a request to generate a proof. * Currently, UltraHonk is the only proving system supported by BB (after plonk was deprecated and removed). * This is used for one-shot proving, not our "IVC" scheme, ClientIVC-honk. For that, use the ClientIVC* commands. - * - * This structure is used to encapsulate all necessary parameters for generating a proof - * for a specific circuit, including the circuit bytecode, verification key, witness data, and options for the proving - * process. */ struct CircuitProve { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitProve"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitProve"; /** * @brief Contains proof and public inputs. @@ -36,11 +52,12 @@ struct CircuitProve { * Example uses of this Response would be verification in native BB, WASM BB, solidity or recursively through Noir. */ struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitProveResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitProveResponse"; - PublicInputsVector public_inputs; - HonkProof proof; - MSGPACK_FIELDS(public_inputs, proof); + std::vector public_inputs; + std::vector proof; + CircuitComputeVk::Response vk; + MSGPACK_FIELDS(public_inputs, proof, vk); bool operator==(const Response&) const = default; }; @@ -52,39 +69,22 @@ struct CircuitProve { bool operator==(const CircuitProve&) const = default; }; -struct CircuitComputeVk { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitComputeVk"; - - struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitComputeVkResponse"; - - std::vector bytes; // Serialized verification key - MSGPACK_FIELDS(bytes); - bool operator==(const Response&) const = default; - }; - - CircuitInputNoVK circuit; - ProofSystemSettings settings; - MSGPACK_FIELDS(circuit, settings); - Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const CircuitComputeVk&) const = default; -}; - /** - * @struct CircuitInfo + * @struct CircuitStats * @brief Consolidated command for retrieving circuit information. * Combines gate count, circuit size, and other metadata into a single command. */ -struct CircuitInfo { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitInfo"; +struct CircuitStats { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitStats"; struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitInfoResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitInfoResponse"; - uint32_t total_gates; - uint32_t subgroup_size; - std::map gates_per_opcode; // Optional: gate counts per opcode - MSGPACK_FIELDS(total_gates, subgroup_size, gates_per_opcode); + uint32_t num_gates{}; + uint32_t num_gates_dyadic{}; + uint32_t num_acir_opcodes{}; + std::vector gates_per_opcode; + MSGPACK_FIELDS(num_gates, num_gates_dyadic, num_acir_opcodes, gates_per_opcode); bool operator==(const Response&) const = default; }; @@ -93,31 +93,7 @@ struct CircuitInfo { ProofSystemSettings settings; MSGPACK_FIELDS(circuit, include_gates_per_opcode, settings); Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const CircuitInfo&) const = default; -}; - -/** - * @struct CircuitCheck - * @brief Verify that a witness satisfies a circuit's constraints. - * For debugging and validation purposes. - */ -struct CircuitCheck { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitCheck"; - - struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitCheckResponse"; - - bool satisfied; - MSGPACK_FIELDS(satisfied); - bool operator==(const Response&) const = default; - }; - - CircuitInput circuit; - std::vector witness; - ProofSystemSettings settings; - MSGPACK_FIELDS(circuit, witness, settings); - Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const CircuitCheck&) const = default; + bool operator==(const CircuitStats&) const = default; }; /** @@ -125,10 +101,10 @@ struct CircuitCheck { * @brief Verify a proof against a verification key and public inputs. */ struct CircuitVerify { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitVerify"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitVerify"; struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitVerifyResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitVerifyResponse"; bool verified; MSGPACK_FIELDS(verified); @@ -136,44 +112,26 @@ struct CircuitVerify { }; std::vector verification_key; - PublicInputsVector public_inputs; - HonkProof proof; + std::vector public_inputs; + std::vector proof; ProofSystemSettings settings; MSGPACK_FIELDS(verification_key, public_inputs, proof, settings); Response execute(const BBApiRequest& request = {}) &&; bool operator==(const CircuitVerify&) const = default; }; -/** - * @struct ProofAsFields - * @brief Convert a proof to field elements representation. - */ -struct ProofAsFields { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ProofAsFields"; - - struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "ProofAsFieldsResponse"; - - std::vector fields; - MSGPACK_FIELDS(fields); - bool operator==(const Response&) const = default; - }; - - HonkProof proof; - MSGPACK_FIELDS(proof); - Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const ProofAsFields&) const = default; -}; - /** * @struct VkAsFields * @brief Convert a verification key to field elements representation. + * WORKTODO(bbapi): this should become mostly obsolete with having the verification keys always reported as field +elements as well, + * and having a simpler serialization method. */ struct VkAsFields { - static constexpr const char* MSGPACK_SCHEMA_NAME = "VkAsFields"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "VkAsFields"; struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "VkAsFieldsResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "VkAsFieldsResponse"; std::vector fields; MSGPACK_FIELDS(fields); @@ -181,8 +139,7 @@ struct VkAsFields { }; std::vector verification_key; - bool is_mega_honk = false; - MSGPACK_FIELDS(verification_key, is_mega_honk); + MSGPACK_FIELDS(verification_key); Response execute(const BBApiRequest& request = {}) &&; bool operator==(const VkAsFields&) const = default; }; @@ -191,10 +148,10 @@ struct VkAsFields { * @brief Command to generate Solidity verifier contract */ struct CircuitWriteSolidityVerifier { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitWriteSolidityVerifier"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitWriteSolidityVerifier"; struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitWriteSolidityVerifierResponse"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "CircuitWriteSolidityVerifierResponse"; std::string solidity_code; MSGPACK_FIELDS(solidity_code); @@ -208,58 +165,4 @@ struct CircuitWriteSolidityVerifier { bool operator==(const CircuitWriteSolidityVerifier&) const = default; }; -/** - * @brief Command to prove and verify in one step - */ -struct CircuitProveAndVerify { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitProveAndVerify"; - - struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitProveAndVerifyResponse"; - - bool verified; - HonkProof proof; - PublicInputsVector public_inputs; - MSGPACK_FIELDS(verified, proof, public_inputs); - bool operator==(const Response&) const = default; - }; - - CircuitInput circuit; - std::vector witness; - ProofSystemSettings settings; - MSGPACK_FIELDS(circuit, witness, settings); - Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const CircuitProveAndVerify&) const = default; -}; - -/** - * @brief Command to benchmark circuit operations - */ -struct CircuitBenchmark { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitBenchmark"; - - struct Response { - static constexpr const char* MSGPACK_SCHEMA_NAME = "CircuitBenchmarkResponse"; - - double witness_generation_time_ms; - double proving_time_ms; - double verification_time_ms; - uint64_t peak_memory_bytes; - MSGPACK_FIELDS(witness_generation_time_ms, proving_time_ms, verification_time_ms, peak_memory_bytes); - bool operator==(const Response&) const = default; - }; - - CircuitInput circuit; - std::vector witness; - ProofSystemSettings settings; - uint32_t num_iterations = 1; - bool benchmark_witness_generation = true; - bool benchmark_proving = true; - MSGPACK_FIELDS(circuit, witness, settings, num_iterations, benchmark_witness_generation, benchmark_proving); - Response execute(const BBApiRequest& request = {}) &&; - bool operator==(const CircuitBenchmark&) const = default; -}; - -// OracleHashType enum and parse_oracle_hash_type are defined in bbapi_shared.hpp - } // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.test.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.test.cpp new file mode 100644 index 000000000000..1c51b33e050b --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.test.cpp @@ -0,0 +1,131 @@ +#include "barretenberg/bbapi/bbapi_ultra_honk.hpp" +#include "barretenberg/client_ivc/acir_bincode_mocks.hpp" +#include "barretenberg/common/thread.hpp" +#include "barretenberg/dsl/acir_format/acir_format.hpp" +#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" +#include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/flavor/ultra_flavor.hpp" +#include +#include + +namespace bb::bbapi { + +class BBApiUltraHonkTest : public ::testing::Test { + protected: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + + void SetUp() override + { + // Store original concurrency for restoration + original_concurrency = get_num_cpus(); + } + + void TearDown() override + { + // Restore original concurrency + set_parallel_for_concurrency(original_concurrency); + } + + size_t original_concurrency; +}; +TEST_F(BBApiUltraHonkTest, CircuitProve) +{ + auto [bytecode, witness] = acir_bincode_mocks::create_simple_circuit_bytecode(); + + // Test different combinations of settings + const std::vector test_settings = { + // ipa_accumulation = true (other values don't matter) + { .ipa_accumulation = true, .oracle_hash_type = "poseidon2", .disable_zk = false }, + // ipa_accumulation = false cases (test both disable_zk values) + { .ipa_accumulation = false, .oracle_hash_type = "poseidon2", .disable_zk = false }, + { .ipa_accumulation = false, .oracle_hash_type = "poseidon2", .disable_zk = true }, + { .ipa_accumulation = false, .oracle_hash_type = "keccak", .disable_zk = false }, + { .ipa_accumulation = false, .oracle_hash_type = "keccak", .disable_zk = true } + }; + + for (const bbapi::ProofSystemSettings& settings : test_settings) { + // Compute VK + auto vk_response = + CircuitComputeVk{ .circuit = { .name = "test_circuit", .bytecode = bytecode }, .settings = settings } + .execute(); + + // First prove + auto prove_response = CircuitProve{ .circuit = { .name = "test_circuit", + .bytecode = bytecode, + .verification_key = vk_response.bytes }, + .witness = witness, + .settings = settings } + .execute(); + + // Verify the proof + auto verify_response = CircuitVerify{ .verification_key = vk_response.bytes, + .public_inputs = prove_response.public_inputs, + .proof = prove_response.proof, + .settings = settings } + .execute(); + + EXPECT_TRUE(verify_response.verified) + << "Failed with ipa_accumulation=" << settings.ipa_accumulation + << ", oracle_hash_type=" << settings.oracle_hash_type << ", disable_zk=" << settings.disable_zk; + } +} + +TEST_F(BBApiUltraHonkTest, ParallelComputeVk) +{ + // Set hardware concurrency to 8 to ensure we can run 8 VK computations in parallel + set_parallel_for_concurrency(8); + + constexpr size_t num_vks = 8; + + // Create different circuits by varying the number of constraints + std::vector> bytecodes(num_vks); + std::vector> witnesses(num_vks); + for (size_t i = 0; i < num_vks; ++i) { + // Create circuit with i+1 constraints (so each circuit is different) + auto [bytecode, witness] = acir_bincode_mocks::create_simple_circuit_bytecode(i + 1); + bytecodes[i] = bytecode; + witnesses[i] = witness; + } + + std::vector parallel_vks(num_vks); + std::vector sequential_vks(num_vks); + + // Use default settings + bbapi::ProofSystemSettings settings{ .ipa_accumulation = false, + .oracle_hash_type = "poseidon2", + .disable_zk = false }; + + // Compute VKs in parallel + parallel_for(num_vks, [&](size_t i) { + parallel_vks[i] = + CircuitComputeVk{ .circuit = { .name = "test_circuit_" + std::to_string(i), .bytecode = bytecodes[i] }, + .settings = settings } + .execute(); + }); + + // Compute VKs sequentially + for (size_t i = 0; i < num_vks; ++i) { + sequential_vks[i] = + CircuitComputeVk{ .circuit = { .name = "test_circuit_" + std::to_string(i), .bytecode = bytecodes[i] }, + .settings = settings } + .execute(); + } + + // Verify all VKs were computed successfully and match between parallel and sequential + for (size_t i = 0; i < num_vks; ++i) { + EXPECT_FALSE(parallel_vks[i].bytes.empty()) << "Parallel VK " << i << " is empty"; + EXPECT_FALSE(sequential_vks[i].bytes.empty()) << "Sequential VK " << i << " is empty"; + + // Parallel and sequential should produce identical VKs for the same circuit + EXPECT_EQ(parallel_vks[i].bytes, sequential_vks[i].bytes) + << "Parallel VK " << i << " differs from sequential VK " << i; + + // Each circuit should have a different VK (different number of constraints) + if (i > 0) { + EXPECT_NE(parallel_vks[i].bytes, parallel_vks[0].bytes) + << "VK " << i << " should differ from VK 0 (different circuits)"; + } + } +} + +} // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/c_bind.cpp b/barretenberg/cpp/src/barretenberg/bbapi/c_bind.cpp index e6cfe20f3fce..7e4c02338d8a 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/c_bind.cpp @@ -13,10 +13,6 @@ namespace bb::bbapi { namespace { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) BBApiRequest global_request; -#ifndef NO_MULTITHREADING -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -std::mutex request_mutex; -#endif } // namespace /** @@ -27,14 +23,6 @@ std::mutex request_mutex; */ CommandResponse bbapi(Command&& command) { -#ifndef NO_MULTITHREADING - // Try to lock, but error if it would block (indicating concurrent access) - std::unique_lock lock(request_mutex, std::try_to_lock); - if (!lock.owns_lock()) { - throw_or_abort("BB API is meant for single-threaded (queued) use only"); - } -#endif - // Execute the command using the global request and return the response return execute(global_request, std::move(command)); } diff --git a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt index ea3622bb149b..d56ef7006dbc 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt @@ -2,7 +2,6 @@ add_subdirectory(basics_bench) add_subdirectory(decrypt_bench) add_subdirectory(goblin_bench) add_subdirectory(ipa_bench) -add_subdirectory(bb_cli_bench) add_subdirectory(client_ivc_bench) add_subdirectory(pippenger_bench) add_subdirectory(protogalaxy_bench) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/basics_bench/basics.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/basics_bench/basics.bench.cpp index 38978614fff9..5636f14d178b 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/basics_bench/basics.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/basics_bench/basics.bench.cpp @@ -21,7 +21,7 @@ * */ #include "barretenberg/commitment_schemes/commitment_key.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/numeric/random/engine.hpp" diff --git a/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/CMakeLists.txt deleted file mode 100644 index 0d70012d0e01..000000000000 --- a/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -if (NOT(FUZZING)) - barretenberg_module(bb_cli_bench bb-cli-lib) -endif() diff --git a/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/bb_cli.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/bb_cli.bench.cpp deleted file mode 100644 index 56e6d55cf0a2..000000000000 --- a/barretenberg/cpp/src/barretenberg/benchmark/bb_cli_bench/bb_cli.bench.cpp +++ /dev/null @@ -1,82 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "barretenberg/bb/cli.hpp" -#include "barretenberg/common/op_count_google_bench.hpp" -#include "barretenberg/common/std_string.hpp" -#include "barretenberg/common/throw_or_abort.hpp" - -namespace { -// This is used to suppress the default output of Google Benchmark. -// This is useful to run this benchmark without altering the stdout result of our simulated bb cli command. -// We instead use the `--benchmark_out` flag to output the results to a file. -class ConsoleNoOutputReporter : public benchmark::BenchmarkReporter { - public: - // We return `true` here to indicate "keep running" if there are multiple benchmark suites. - bool ReportContext(const Context&) override { return true; } - - // Called once for each run. We just do nothing here. - void ReportRuns(const std::vector&) override {} - - // Called at the end. Also do nothing. - void Finalize() override {} -}; - -// Benches the bb cli/main.cpp functionality by parsing MAIN_ARGS. -void benchmark_bb_cli(benchmark::State& state) -{ - // Get MAIN_ARGS from environment - const char* main_args_env = std::getenv("MAIN_ARGS"); - if (main_args_env == nullptr) { - throw_or_abort("Environment variable MAIN_ARGS must be set"); - } - - // Parse the space-delimited arguments - std::vector args = bb::detail::split(main_args_env, ' '); - - // Add the program name to the arguments - args.insert(args.begin(), "bb"); - - if (args.empty()) { - throw_or_abort("MAIN_ARGS must contain at least one argument"); - } - - // Convert to C-style argc/argv - std::vector argv(args.size()); - - // Convert each string to char* for the argv array - for (size_t i = 0; i < args.size(); ++i) { - // NOLINTNEXTLINE - argv[i] = const_cast(args[i].c_str()); - } - - for (auto _ : state) { - BB_REPORT_OP_COUNT_IN_BENCH(state); - - // Call the main function with the parsed arguments - int result = bb::parse_and_run_cli_command(static_cast(args.size()), argv.data()); - if (result != 0) { - exit(result); - } - } -} - -BENCHMARK(benchmark_bb_cli)->Iterations(1)->Unit(benchmark::kMillisecond); - -} // namespace - -int main(int argc, char** argv) -{ - ::benchmark ::Initialize(&argc, argv); - if (::benchmark ::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - auto report = std::make_unique(); - ::benchmark ::RunSpecifiedBenchmarks(report.get()); - ::benchmark ::Shutdown(); - return 0; -} diff --git a/barretenberg/cpp/src/barretenberg/benchmark/client_ivc_bench/client_ivc.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/client_ivc_bench/client_ivc.bench.cpp index 553104ad6c2a..1814a585ff3e 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/client_ivc_bench/client_ivc.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/client_ivc_bench/client_ivc.bench.cpp @@ -6,7 +6,7 @@ #include #include "barretenberg/client_ivc/test_bench_shared.hpp" -#include "barretenberg/common/op_count_google_bench.hpp" +#include "barretenberg/common/google_bb_bench.hpp" using namespace benchmark; using namespace bb; @@ -19,7 +19,7 @@ namespace { class ClientIVCBench : public benchmark::Fixture { public: // Number of function circuits to accumulate (based on Zac's target numbers) - static constexpr size_t NUM_ITERATIONS_MEDIUM_COMPLEXITY = 6; + static constexpr size_t NUM_ITERATIONS_MEDIUM_COMPLEXITY = 5; void SetUp([[maybe_unused]] const ::benchmark::State& state) override { @@ -32,20 +32,12 @@ class ClientIVCBench : public benchmark::Fixture { */ BENCHMARK_DEFINE_F(ClientIVCBench, VerificationOnly)(benchmark::State& state) { - ClientIVC ivc{ /*num_circuits=*/2, { AZTEC_TRACE_STRUCTURE } }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - - // Initialize the IVC with an arbitrary circuit - circuit_producer.construct_and_accumulate_next_circuit(ivc); - - // Create another circuit and accumulate - circuit_producer.construct_and_accumulate_next_circuit(ivc); - - auto proof = ivc.prove(); + size_t NUM_APP_CIRCUITS = 1; + auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); + auto [proof, vk] = accumulate_and_prove_ivc_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); for (auto _ : state) { - benchmark::DoNotOptimize(ivc.verify(proof)); + benchmark::DoNotOptimize(ClientIVC::verify(proof, vk)); } } @@ -54,40 +46,18 @@ BENCHMARK_DEFINE_F(ClientIVCBench, VerificationOnly)(benchmark::State& state) */ BENCHMARK_DEFINE_F(ClientIVCBench, Full)(benchmark::State& state) { - - auto total_num_circuits = 2 * static_cast(state.range(0)); // 2x accounts for kernel circuits - ClientIVC ivc{ total_num_circuits, { AZTEC_TRACE_STRUCTURE } }; - auto mocked_vks = mock_vks(total_num_circuits); - - for (auto _ : state) { - BB_REPORT_OP_COUNT_IN_BENCH(state); - perform_ivc_accumulation_rounds(total_num_circuits, ivc, mocked_vks); - ivc.prove(); - } -} -/** - * @brief Benchmark the prover work for the full PG-Goblin IVC protocol - * @details Processes "dense" circuits of size 2^17 in a size 2^20 structured trace - */ -BENCHMARK_DEFINE_F(ClientIVCBench, Ambient_17_in_20)(benchmark::State& state) -{ - - auto total_num_circuits = 2 * static_cast(state.range(0)); // 2x accounts for kernel circuits - ClientIVC ivc{ total_num_circuits, { AZTEC_TRACE_STRUCTURE } }; - const bool large_first_app = false; - auto mocked_vks = mock_vks(total_num_circuits, large_first_app); + size_t NUM_APP_CIRCUITS = static_cast(state.range(0)); + auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); for (auto _ : state) { - BB_REPORT_OP_COUNT_IN_BENCH(state); - perform_ivc_accumulation_rounds(total_num_circuits, ivc, mocked_vks, large_first_app); - ivc.prove(); + GOOGLE_BB_BENCH_REPORTER(state); + accumulate_and_prove_ivc_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); } } #define ARGS Arg(ClientIVCBench::NUM_ITERATIONS_MEDIUM_COMPLEXITY)->Arg(2) BENCHMARK_REGISTER_F(ClientIVCBench, Full)->Unit(benchmark::kMillisecond)->ARGS; -BENCHMARK_REGISTER_F(ClientIVCBench, Ambient_17_in_20)->Unit(benchmark::kMillisecond)->ARGS; BENCHMARK_REGISTER_F(ClientIVCBench, VerificationOnly)->Unit(benchmark::kMillisecond); } // namespace diff --git a/barretenberg/cpp/src/barretenberg/benchmark/decrypt_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/decrypt_bench/CMakeLists.txt index da70f1faed88..a925a00a1010 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/decrypt_bench/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/decrypt_bench/CMakeLists.txt @@ -1,4 +1,4 @@ -if (NOT FUZZING) +if (NOT FUZZING) add_executable( decrypt_bench main.cpp @@ -10,4 +10,16 @@ if (NOT FUZZING) ecc common ) -endif() \ No newline at end of file + if(ENABLE_STACKTRACES) + target_link_libraries( + decrypt_bench + PUBLIC + Backward::Interface + ) + target_link_options( + decrypt_bench + PRIVATE + -ldw -lelf + ) + endif() +endif() diff --git a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/pippenger.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/pippenger.bench.cpp index 10c505f27d73..1ab482cb0722 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/pippenger.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/pippenger.bench.cpp @@ -6,7 +6,7 @@ #include "barretenberg/srs/global_crs.hpp" #include -#include "barretenberg/common/op_count_google_bench.hpp" +#include "barretenberg/common/google_bb_bench.hpp" #include #include @@ -65,7 +65,7 @@ BENCHMARK_DEFINE_F(PippengerBench, Full)(benchmark::State& state) PolynomialSpan scalars = PolynomialSpan(0, span); for (auto _ : state) { - BB_REPORT_OP_COUNT_IN_BENCH(state); + GOOGLE_BB_BENCH_REPORTER(state); (scalar_multiplication::pippenger_unsafe(scalars, points)); } } diff --git a/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_bench/protogalaxy.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_bench/protogalaxy.bench.cpp index 0819c21fa4a9..86b8b632f450 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_bench/protogalaxy.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_bench/protogalaxy.bench.cpp @@ -1,12 +1,11 @@ #include -#include "barretenberg/common/op_count_google_bench.hpp" +#include "barretenberg/common/google_bb_bench.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" #include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" using namespace benchmark; @@ -27,7 +26,7 @@ void vector_of_evaluations(State& state) noexcept void compute_row_evaluations(State& state) noexcept { - using PGInternal = ProtogalaxyProverInternal>; + using PGInternal = ProtogalaxyProverInternal>; using Polys = Flavor::ProverPolynomials; using Alphas = Flavor::SubrelationSeparators; using Params = RelationParameters; @@ -44,42 +43,41 @@ void compute_row_evaluations(State& state) noexcept } } -// Fold one proving key into an accumulator. -void fold_k(State& state) noexcept +// Fold one instance into an accumulator. +void fold(State& state) noexcept { - static constexpr size_t k{ 1 }; - using DeciderProvingKey = DeciderProvingKey_; - using DeciderVerificationKey = DeciderVerificationKey_; - using ProtogalaxyProver = ProtogalaxyProver_; + using ProverInstance = ProverInstance_; + using VerifierInstance = VerifierInstance_; + using ProtogalaxyProver = ProtogalaxyProver_; using Builder = typename Flavor::CircuitBuilder; bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); auto log2_num_gates = static_cast(state.range(0)); - const auto construct_key = [&]() { + const auto construct_inst = [&]() { Builder builder; MockCircuits::construct_arithmetic_circuit(builder, log2_num_gates); - return std::make_shared(builder); + return std::make_shared(builder); }; - std::vector> decider_pks; - std::vector> decider_vks; + std::array, NUM_INSTANCES> prover_insts; + std::array, NUM_INSTANCES> verifier_insts; // TODO(https://github.com/AztecProtocol/barretenberg/issues/938): Parallelize this loop - for (size_t i = 0; i < k + 1; ++i) { - std::shared_ptr decider_pk = construct_key(); - auto honk_vk = std::make_shared(decider_pk->get_precomputed()); - std::shared_ptr decider_vk = std::make_shared(honk_vk); - decider_pks.emplace_back(decider_pk); - decider_vks.emplace_back(decider_vk); + for (size_t i = 0; i < NUM_INSTANCES; ++i) { + std::shared_ptr prover_inst = construct_inst(); + auto honk_vk = std::make_shared(prover_inst->get_precomputed()); + std::shared_ptr verifier_inst = std::make_shared(honk_vk); + prover_insts[i] = prover_inst; + verifier_insts[i] = verifier_inst; } std::shared_ptr transcript = std::make_shared(); - ProtogalaxyProver folding_prover(decider_pks, decider_vks, transcript); + ProtogalaxyProver folding_prover(prover_insts, verifier_insts, transcript); for (auto _ : state) { - BB_REPORT_OP_COUNT_IN_BENCH(state); + GOOGLE_BB_BENCH_REPORTER(state); auto proof = folding_prover.prove(); } } @@ -87,7 +85,7 @@ void fold_k(State& state) noexcept BENCHMARK(vector_of_evaluations)->DenseRange(15, 21)->Unit(kMillisecond)->Iterations(1); BENCHMARK(compute_row_evaluations)->DenseRange(15, 21)->Unit(kMillisecond); // We stick to just k=1 for compile-time reasons. -BENCHMARK(fold_k)->/* vary the circuit size */ DenseRange(14, 20)->Unit(kMillisecond); +BENCHMARK(fold)->/* vary the circuit size */ DenseRange(14, 20)->Unit(kMillisecond); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_rounds_bench/protogalaxy_rounds.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_rounds_bench/protogalaxy_rounds.bench.cpp index 2c408bd38856..cbdb4259171d 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_rounds_bench/protogalaxy_rounds.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/protogalaxy_rounds_bench/protogalaxy_rounds.bench.cpp @@ -3,7 +3,6 @@ #include "barretenberg/protogalaxy/protogalaxy_prover.hpp" #include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" using namespace benchmark; @@ -14,39 +13,40 @@ using Flavor = MegaFlavor; void _bench_round(::benchmark::State& state, void (*F)(ProtogalaxyProver_&)) { using Builder = typename Flavor::CircuitBuilder; - using DeciderProvingKey = DeciderProvingKey_; - using DeciderVerificationKey = DeciderVerificationKey_; + using ProverInstance = ProverInstance_; + using VerifierInstance = VerifierInstance_; using ProtogalaxyProver = ProtogalaxyProver_; bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); auto log2_num_gates = static_cast(state.range(0)); - const auto construct_key = [&]() { + const auto construct_inst = [&]() { Builder builder; MockCircuits::construct_arithmetic_circuit(builder, log2_num_gates); - return std::make_shared(builder); + return std::make_shared(builder); }; // TODO(https://github.com/AztecProtocol/barretenberg/issues/938): Parallelize this loop, also extend to more than // k=1 - std::shared_ptr decider_pk_1 = construct_key(); - auto honk_vk_1 = std::make_shared(decider_pk_1->get_precomputed()); - auto decider_vk_1 = std::make_shared(honk_vk_1); - std::shared_ptr decider_pk_2 = construct_key(); - auto honk_vk_2 = std::make_shared(decider_pk_2->get_precomputed()); - auto decider_vk_2 = std::make_shared(honk_vk_2); + std::shared_ptr prover_inst_1 = construct_inst(); + auto honk_vk_1 = std::make_shared(prover_inst_1->get_precomputed()); + auto verifier_inst_1 = std::make_shared(honk_vk_1); + std::shared_ptr prover_inst_2 = construct_inst(); + auto honk_vk_2 = std::make_shared(prover_inst_2->get_precomputed()); + auto verifier_inst_2 = std::make_shared(honk_vk_2); std::shared_ptr transcript = std::make_shared(); - ProtogalaxyProver folding_prover({ decider_pk_1, decider_pk_2 }, { decider_vk_1, decider_vk_2 }, transcript); + ProtogalaxyProver folding_prover( + { prover_inst_1, prover_inst_2 }, { verifier_inst_1, verifier_inst_2 }, transcript); // prepare the prover state - folding_prover.accumulator = decider_pk_1; + folding_prover.accumulator = prover_inst_1; folding_prover.deltas.resize(log2_num_gates); std::fill_n(folding_prover.deltas.begin(), log2_num_gates, 0); folding_prover.perturbator = Flavor::Polynomial::random(1 << log2_num_gates); folding_prover.transcript = Flavor::Transcript::prover_init_empty(); - folding_prover.run_oink_prover_on_each_incomplete_key(); + folding_prover.run_oink_prover_on_each_incomplete_instance(); for (auto _ : state) { F(folding_prover); @@ -58,15 +58,15 @@ void bench_round_mega(::benchmark::State& state, void (*F)(ProtogalaxyProver_ DenseRange(14, 20) -> Unit(kMillisecond); BENCHMARK_CAPTURE(bench_round_mega, perturbator, [](auto& prover) { prover.perturbator_round(prover.accumulator); }) -> DenseRange(14, 20) -> Unit(kMillisecond); BENCHMARK_CAPTURE(bench_round_mega, combiner_quotient, [](auto& prover) { - prover.combiner_quotient_round(prover.accumulator->gate_challenges, prover.deltas, prover.keys_to_fold); + prover.combiner_quotient_round(prover.accumulator->gate_challenges, prover.deltas, prover.prover_insts_to_fold); }) -> DenseRange(14, 20) -> Unit(kMillisecond); BENCHMARK_CAPTURE(bench_round_mega, fold, [](auto& prover) { - prover.update_target_sum_and_fold(prover.keys_to_fold, + prover.update_target_sum_and_fold(prover.prover_insts_to_fold, prover.combiner_quotient, prover.alphas, prover.relation_parameters, diff --git a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp index daba4a4b6e1b..c27fbf0a1119 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp @@ -3,7 +3,6 @@ #include "barretenberg/flavor/ultra_flavor.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" // just for an alias; should perhaps move to prover #include "barretenberg/translator_vm/translator_flavor.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include namespace { @@ -53,10 +52,9 @@ template void execute_relation_for_univaria // Single execution of relation on PG univariates, i.e. PG combiner work template void execute_relation_for_pg_univariates(::benchmark::State& state) { - using DeciderProvingKeys = DeciderProvingKeys_; - using Input = ProtogalaxyProverInternal::ExtendedUnivariates; - using Accumulator = - typename Relation::template ProtogalaxyTupleOfUnivariatesOverSubrelations; + using ProverInstance = ProverInstance_; + using Input = ProtogalaxyProverInternal::ExtendedUnivariates; + using Accumulator = typename Relation::template ProtogalaxyTupleOfUnivariatesOverSubrelations; execute_relation(state); } diff --git a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mock_circuits.hpp index e1bb26b6928e..e87f6c087f5c 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mock_circuits.hpp @@ -54,11 +54,11 @@ Prover get_prover(void (*test_circuit_function)(typename Prover::Flavor::Circuit Builder builder; test_circuit_function(builder, num_iterations); - PROFILE_THIS_NAME("creating prover"); + BB_BENCH_NAME("creating prover"); - auto proving_key = std::make_shared>(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - return Prover(proving_key, verification_key); + auto prover_instance = std::make_shared>(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + return Prover(prover_instance, verification_key); }; /** diff --git a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp deleted file mode 100644 index 2160dbf6c492..000000000000 --- a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include - -#include "barretenberg/benchmark/ultra_bench/mock_circuits.hpp" -#include "barretenberg/common/op_count_google_bench.hpp" -#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" -#include "barretenberg/ultra_honk/decider_prover.hpp" -#include "barretenberg/ultra_honk/oink_prover.hpp" -#include "barretenberg/ultra_honk/ultra_prover.hpp" - -using namespace benchmark; -using namespace bb; - -// The rounds to measure -enum { - PREAMBLE, - WIRE_COMMITMENTS, - SORTED_LIST_ACCUMULATOR, - LOG_DERIVATIVE_INVERSE, - GRAND_PRODUCT_COMPUTATION, - GENERATE_ALPHAS, - RELATION_CHECK -}; - -/** - * @details Benchmark Goblin ultrahonk by performing all the rounds, but only measuring one. - * Note: As a result the very short rounds take a long time for statistical significance, so recommended to set their - * iterations to 1. - * @param state - The google benchmark state. - * @param prover - The Goblin ultrahonk prover. - * @param index - The pass to measure. - **/ -BB_PROFILE void test_round_inner(State& state, MegaProver& prover, size_t index) noexcept -{ - auto time_if_index = [&](size_t target_index, auto&& func) -> void { - BB_REPORT_OP_COUNT_IN_BENCH(state); - if (index == target_index) { - state.ResumeTiming(); - } - - func(); - if (index == target_index) { - state.PauseTiming(); - } else { - // We don't actually want to write to user-defined counters - BB_REPORT_OP_COUNT_BENCH_CANCEL(); - } - }; - // why is this mega if the name of file is ultra - auto verification_key = std::make_shared(prover.proving_key->get_precomputed()); - OinkProver oink_prover(prover.proving_key, verification_key, prover.transcript); - time_if_index(PREAMBLE, [&] { oink_prover.execute_preamble_round(); }); - time_if_index(WIRE_COMMITMENTS, [&] { oink_prover.execute_wire_commitments_round(); }); - time_if_index(SORTED_LIST_ACCUMULATOR, [&] { oink_prover.execute_sorted_list_accumulator_round(); }); - time_if_index(LOG_DERIVATIVE_INVERSE, [&] { oink_prover.execute_log_derivative_inverse_round(); }); - time_if_index(GRAND_PRODUCT_COMPUTATION, [&] { oink_prover.execute_grand_product_computation_round(); }); - time_if_index(GENERATE_ALPHAS, [&] { prover.proving_key->alphas = oink_prover.generate_alphas_round(); }); - - prover.generate_gate_challenges(); - - DeciderProver_ decider_prover(prover.proving_key, prover.transcript); - time_if_index(RELATION_CHECK, [&] { decider_prover.execute_relation_check_rounds(); }); -} -BB_PROFILE static void test_round(State& state, size_t index) noexcept -{ - auto log2_num_gates = static_cast(state.range(0)); - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/761) benchmark both sparse and dense circuits - auto prover = bb::mock_circuits::get_prover( - &bb::mock_circuits::generate_basic_arithmetic_circuit, log2_num_gates); - for (auto _ : state) { - state.PauseTiming(); - test_round_inner(state, prover, index); - state.ResumeTiming(); - // NOTE: google bench is very finnicky, must end in ResumeTiming() for correctness - } -} -#define ROUND_BENCHMARK(round) \ - static void ROUND_##round(State& state) noexcept \ - { \ - test_round(state, round); \ - } \ - BENCHMARK(ROUND_##round)->DenseRange(12, 19)->Unit(kMillisecond) - -// Fast rounds take a long time to benchmark because of how we compute statistical significance. -// Limit to one iteration so we don't spend a lot of time redoing full proofs just to measure this part. -ROUND_BENCHMARK(PREAMBLE)->Iterations(1); -ROUND_BENCHMARK(WIRE_COMMITMENTS)->Iterations(1); -ROUND_BENCHMARK(SORTED_LIST_ACCUMULATOR)->Iterations(1); -ROUND_BENCHMARK(LOG_DERIVATIVE_INVERSE)->Iterations(1); -ROUND_BENCHMARK(GRAND_PRODUCT_COMPUTATION)->Iterations(1); -ROUND_BENCHMARK(GENERATE_ALPHAS)->Iterations(1); -ROUND_BENCHMARK(RELATION_CHECK); - -BENCHMARK_MAIN(); diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/CMakeLists.txt index 00257d6b32f4..27b252ea1fcd 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/CMakeLists.txt @@ -1 +1,4 @@ -barretenberg_module(boomerang_value_detection stdlib_circuit_builders circuit_checker stdlib_primitives numeric stdlib_aes128 stdlib_sha256 stdlib_blake2s stdlib_blake3s stdlib_poseidon2 stdlib_goblin_verifier) +barretenberg_module(boomerang_value_detection stdlib_circuit_builders circuit_checker + stdlib_primitives numeric stdlib_aes128 stdlib_sha256 stdlib_blake2s + stdlib_blake3s stdlib_poseidon2 stdlib_honk_verifier stdlib_protogalaxy_verifier + stdlib_merge_verifier) diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp index f9e7ddf4f9a7..33d527bb5e6f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp @@ -1,5 +1,6 @@ #include "./graph.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" #include #include @@ -17,10 +18,10 @@ namespace cdg { * @param block block to find * @return size_t index of the found block */ -template -size_t StaticAnalyzer_::find_block_index(UltraCircuitBuilder& ultra_builder, const UltraBlock& block) +template +size_t StaticAnalyzer_::find_block_index(const auto& block) { - auto blocks_data = ultra_builder.blocks.get(); + auto blocks_data = circuit_builder.blocks.get(); size_t index = 0; for (size_t i = 0; i < blocks_data.size(); i++) { if ((void*)(&blocks_data[i]) == (void*)(&block)) { @@ -45,10 +46,10 @@ size_t StaticAnalyzer_::find_block_index(UltraCircuitBuilder& ultra_builder, * 4) Updates variable_gates map with gate indices for each variable * 5) Increments the gate count for each processed variable */ -template -inline void StaticAnalyzer_::process_gate_variables(std::vector& gate_variables, - size_t gate_index, - size_t block_idx) +template +inline void StaticAnalyzer_::process_gate_variables(std::vector& gate_variables, + size_t gate_index, + size_t block_idx) { auto unique_variables = std::unique(gate_variables.begin(), gate_variables.end()); gate_variables.erase(unique_variables, gate_variables.end()); @@ -75,11 +76,12 @@ inline void StaticAnalyzer_::process_gate_variables(std::vector& g * @details Processes both regular arithmetic gates and minigates, handling fixed witness gates * and different arithmetic operations based on selector values */ -template -inline std::vector> StaticAnalyzer_::get_arithmetic_gate_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t block_idx, UltraBlock& blk) +template +inline std::vector StaticAnalyzer_::get_arithmetic_gate_connected_component( + size_t index, size_t block_idx, auto& blk) { auto q_arith = blk.q_arith()[index]; + std::vector all_variables; std::vector gate_variables; std::vector minigate_variables; std::vector> all_gates_variables; @@ -98,7 +100,7 @@ inline std::vector> StaticAnalyzer_::get_arithmetic_ga uint32_t fourth_idx = blk.w_4()[index]; if (q_m.is_zero() && q_1 == 1 && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && q_arith == FF::one()) { // this is fixed_witness gate. So, variable index contains in left wire. So, we have to take only it. - fixed_variables.insert(this->to_real(ultra_circuit_builder, left_idx)); + fixed_variables.insert(this->to_real(left_idx)); } else if (!q_m.is_zero() || q_1 != FF::one() || !q_2.is_zero() || !q_3.is_zero() || !q_4.is_zero()) { // this is not the gate for fix_witness, so we have to process this gate if (!q_m.is_zero()) { @@ -139,16 +141,13 @@ inline std::vector> StaticAnalyzer_::get_arithmetic_ga } } } - gate_variables = this->to_real(ultra_circuit_builder, gate_variables); - minigate_variables = this->to_real(ultra_circuit_builder, minigate_variables); - this->process_gate_variables(gate_variables, index, block_idx); - this->process_gate_variables(minigate_variables, index, block_idx); - all_gates_variables.emplace_back(gate_variables); - if (!minigate_variables.empty()) { - all_gates_variables.emplace_back(minigate_variables); - } - - return all_gates_variables; + gate_variables = to_real(gate_variables); + minigate_variables = to_real(minigate_variables); + all_variables.reserve(gate_variables.size() + minigate_variables.size()); + all_variables.insert(all_variables.end(), gate_variables.begin(), gate_variables.end()); + all_variables.insert(all_variables.end(), minigate_variables.begin(), minigate_variables.end()); + process_gate_variables(all_variables, index, block_idx); + return all_variables; } /** @@ -162,35 +161,43 @@ inline std::vector> StaticAnalyzer_::get_arithmetic_ga * @details Handles both elliptic curve addition and doubling operations, * collecting variables from current and next gates as needed */ -template -inline std::vector StaticAnalyzer_::get_elliptic_gate_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t block_idx, UltraBlock& blk) +template +inline std::vector StaticAnalyzer_::get_elliptic_gate_connected_component( + size_t index, size_t block_idx, auto& blk) { std::vector gate_variables; if (!blk.q_elliptic()[index].is_zero()) { + std::vector first_row_variables; + std::vector second_row_variables; gate_variables.reserve(6); bool is_elliptic_add_gate = !blk.q_1()[index].is_zero() && blk.q_m()[index].is_zero(); bool is_elliptic_dbl_gate = blk.q_1()[index].is_zero() && blk.q_m()[index] == FF::one(); - auto right_idx = blk.w_r()[index]; - auto out_idx = blk.w_o()[index]; - gate_variables.emplace_back(right_idx); - gate_variables.emplace_back(out_idx); + first_row_variables.emplace_back(blk.w_r()[index]); + first_row_variables.emplace_back(blk.w_o()[index]); if (index != blk.size() - 1) { if (is_elliptic_add_gate) { // if this gate is ecc_add_gate, we have to get indices x2, x3, y3, y2 from the next gate - gate_variables.emplace_back(blk.w_l()[index + 1]); - gate_variables.emplace_back(blk.w_r()[index + 1]); - gate_variables.emplace_back(blk.w_o()[index + 1]); - gate_variables.emplace_back(blk.w_4()[index + 1]); + second_row_variables.emplace_back(blk.w_l()[index + 1]); + second_row_variables.emplace_back(blk.w_r()[index + 1]); + second_row_variables.emplace_back(blk.w_o()[index + 1]); + second_row_variables.emplace_back(blk.w_4()[index + 1]); } if (is_elliptic_dbl_gate) { // if this gate is ecc_dbl_gate, we have to indices x3, y3 from right and output wires - gate_variables.emplace_back(blk.w_r()[index + 1]); - gate_variables.emplace_back(blk.w_o()[index + 1]); + second_row_variables.emplace_back(blk.w_r()[index + 1]); + second_row_variables.emplace_back(blk.w_o()[index + 1]); } } - gate_variables = this->to_real(ultra_circuit_builder, gate_variables); - this->process_gate_variables(gate_variables, index, block_idx); + if (!first_row_variables.empty()) { + first_row_variables = to_real(first_row_variables); + process_gate_variables(first_row_variables, index, block_idx); + gate_variables.insert(gate_variables.end(), first_row_variables.cbegin(), first_row_variables.cend()); + } + if (!second_row_variables.empty()) { + second_row_variables = to_real(second_row_variables); + process_gate_variables(second_row_variables, index, block_idx); + gate_variables.insert(gate_variables.end(), second_row_variables.cbegin(), second_row_variables.cend()); + } } return gate_variables; } @@ -206,20 +213,30 @@ inline std::vector StaticAnalyzer_::get_elliptic_gate_connected_co * @details Processes delta range constraints by collecting all wire indices * from the current gate */ -template -inline std::vector StaticAnalyzer_::get_sort_constraint_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t blk_idx, UltraBlock& block) +template +inline std::vector StaticAnalyzer_::get_sort_constraint_connected_component( + size_t index, size_t blk_idx, auto& block) { std::vector gate_variables = {}; if (!block.q_delta_range()[index].is_zero()) { - gate_variables.reserve(4); - gate_variables.emplace_back(block.w_l()[index]); - gate_variables.emplace_back(block.w_r()[index]); - gate_variables.emplace_back(block.w_o()[index]); - gate_variables.emplace_back(block.w_4()[index]); + std::vector row_variables = { + block.w_l()[index], block.w_r()[index], block.w_o()[index], block.w_4()[index] + }; + /* + sometimes process_range_list function adds variables with zero_idx in beginning of vector with indices + in order to pad a size of indices to gate width. But tool has to ignore these additional variables + */ + for (const auto& var_idx : row_variables) { + if (var_idx != circuit_builder.zero_idx()) { + gate_variables.emplace_back(var_idx); + } + } + if (index != block.size() - 1 && block.w_l()[index + 1] != circuit_builder.zero_idx()) { + gate_variables.emplace_back(block.w_l()[index + 1]); + } } - gate_variables = this->to_real(ultra_circuit_builder, gate_variables); - this->process_gate_variables(gate_variables, index, blk_idx); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, blk_idx); return gate_variables; } @@ -234,9 +251,10 @@ inline std::vector StaticAnalyzer_::get_sort_constraint_connected_ * @details Processes plookup gates by collecting variables based on selector values, * including variables from the next gate when necessary */ -template -inline std::vector StaticAnalyzer_::get_plookup_gate_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t blk_idx, UltraBlock& block) +template +inline std::vector StaticAnalyzer_::get_plookup_gate_connected_component(size_t index, + size_t blk_idx, + auto& block) { std::vector gate_variables; auto q_lookup_type = block.q_lookup_type()[index]; @@ -259,8 +277,8 @@ inline std::vector StaticAnalyzer_::get_plookup_gate_connected_com gate_variables.emplace_back(block.w_o()[index + 1]); } } - gate_variables = this->to_real(ultra_circuit_builder, gate_variables); - this->process_gate_variables(gate_variables, index, blk_idx); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, blk_idx); } return gate_variables; } @@ -274,9 +292,10 @@ inline std::vector StaticAnalyzer_::get_plookup_gate_connected_com * @param block block containing the gates * @return std::vector vector of connected variables from the gate */ -template -inline std::vector StaticAnalyzer_::get_poseido2s_gate_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t blk_idx, UltraBlock& block) +template +inline std::vector StaticAnalyzer_::get_poseido2s_gate_connected_component(size_t index, + size_t blk_idx, + auto& block) { std::vector gate_variables; auto internal_selector = block.q_poseidon2_internal()[index]; @@ -293,8 +312,8 @@ inline std::vector StaticAnalyzer_::get_poseido2s_gate_connected_c gate_variables.emplace_back(block.w_o()[index + 1]); gate_variables.emplace_back(block.w_4()[index + 1]); } - gate_variables = this->to_real(ultra_circuit_builder, gate_variables); - this->process_gate_variables(gate_variables, index, blk_idx); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, blk_idx); } return gate_variables; } @@ -308,9 +327,10 @@ inline std::vector StaticAnalyzer_::get_poseido2s_gate_connected_c * @param block block containing the gates * @return std::vector vector of connected variables from the gate */ -template -inline std::vector StaticAnalyzer_::get_memory_gate_connected_component( - bb::UltraCircuitBuilder& ultra_builder, size_t index, size_t blk_idx, UltraBlock& block) +template +inline std::vector StaticAnalyzer_::get_memory_gate_connected_component(size_t index, + size_t blk_idx, + auto& block) { std::vector gate_variables; if (!block.q_memory()[index].is_zero()) { @@ -319,14 +339,6 @@ inline std::vector StaticAnalyzer_::get_memory_gate_connected_comp auto q_2 = block.q_2()[index]; auto q_3 = block.q_3()[index]; auto q_4 = block.q_4()[index]; - [[maybe_unused]] auto q_m = block.q_m()[index]; - [[maybe_unused]] auto q_c = block.q_c()[index]; - - [[maybe_unused]] auto w_l = block.w_l()[index]; - [[maybe_unused]] auto w_r = block.w_r()[index]; - [[maybe_unused]] auto w_o = block.w_o()[index]; - [[maybe_unused]] auto w_4 = block.w_4()[index]; - if (q_1 == FF::one() && q_4 == FF::one()) { ASSERT(q_3.is_zero()); // ram timestamp check @@ -361,8 +373,8 @@ inline std::vector StaticAnalyzer_::get_memory_gate_connected_comp } } } - gate_variables = this->to_real(ultra_builder, gate_variables); - this->process_gate_variables(gate_variables, index, blk_idx); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, blk_idx); return gate_variables; } @@ -375,9 +387,9 @@ inline std::vector StaticAnalyzer_::get_memory_gate_connected_comp * @param block block containing the gates * @return std::vector vector of connected variables from the gate */ -template -inline std::vector StaticAnalyzer_::get_non_native_field_gate_connected_component( - bb::UltraCircuitBuilder& ultra_builder, size_t index, size_t blk_idx, UltraBlock& block) +template +inline std::vector StaticAnalyzer_::get_non_native_field_gate_connected_component( + size_t index, size_t blk_idx, auto& block) { std::vector gate_variables; if (!block.q_nnf()[index].is_zero()) { @@ -387,7 +399,6 @@ inline std::vector StaticAnalyzer_::get_non_native_field_gate_conn auto q_3 = block.q_3()[index]; auto q_4 = block.q_4()[index]; auto q_m = block.q_m()[index]; - [[maybe_unused]] auto q_c = block.q_c()[index]; auto w_l = block.w_l()[index]; auto w_r = block.w_r()[index]; @@ -444,8 +455,8 @@ inline std::vector StaticAnalyzer_::get_non_native_field_gate_conn } } } - gate_variables = this->to_real(ultra_builder, gate_variables); - this->process_gate_variables(gate_variables, index, blk_idx); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, blk_idx); return gate_variables; } @@ -456,53 +467,56 @@ inline std::vector StaticAnalyzer_::get_non_native_field_gate_conn * @param rom_array ROM transcript containing records with witness indices and gate information * @return std::vector vector of connected variables from ROM table gates */ -template -inline std::vector StaticAnalyzer_::get_rom_table_connected_component( - bb::UltraCircuitBuilder& ultra_builder, const bb::RomTranscript& rom_array) +template +inline std::vector StaticAnalyzer_::get_rom_table_connected_component( + const bb::RomTranscript& rom_array) { - size_t block_index = find_block_index(ultra_builder, ultra_builder.blocks.memory); - BB_ASSERT_EQ(block_index, 5U); - // Every RomTranscript data structure has 2 main components that are interested for static analyzer: // 1) records contains values that were put in the gate, we can use them to create connections between variables // 2) states contains values witness indexes that we can find in the ROM record in the RomTrascript, so we can // ignore state of the ROM transcript, because we still can connect all variables using variables from records. std::vector rom_table_variables; + if (std::optional blk_idx = find_block_index(circuit_builder.blocks.memory); blk_idx) { + // Every RomTranscript data structure has 2 main components that are interested for static analyzer: + // 1) records contains values that were put in the gate, we can use them to create connections between variables + // 2) states contains values witness indexes that we can find in the ROM record in the RomTrascript, so we can + // ignore state of the ROM transcript, because we still can connect all variables using variables from records. + for (const auto& record : rom_array.records) { + std::vector gate_variables; + size_t gate_index = record.gate_index; + + auto q_1 = circuit_builder.blocks.memory.q_1()[gate_index]; + auto q_2 = circuit_builder.blocks.memory.q_2()[gate_index]; + auto q_3 = circuit_builder.blocks.memory.q_3()[gate_index]; + auto q_4 = circuit_builder.blocks.memory.q_4()[gate_index]; + auto q_m = circuit_builder.blocks.memory.q_m()[gate_index]; + auto q_c = circuit_builder.blocks.memory.q_c()[gate_index]; + + auto index_witness = record.index_witness; + auto vc1_witness = record.value_column1_witness; // state[0] from RomTranscript + auto vc2_witness = record.value_column2_witness; // state[1] from RomTranscript + auto record_witness = record.record_witness; - for (const auto& record : rom_array.records) { - std::vector gate_variables; - size_t gate_index = record.gate_index; - - auto q_1 = ultra_builder.blocks.memory.q_1()[gate_index]; - auto q_2 = ultra_builder.blocks.memory.q_2()[gate_index]; - auto q_3 = ultra_builder.blocks.memory.q_3()[gate_index]; - auto q_4 = ultra_builder.blocks.memory.q_4()[gate_index]; - auto q_m = ultra_builder.blocks.memory.q_m()[gate_index]; - auto q_c = ultra_builder.blocks.memory.q_c()[gate_index]; - - auto index_witness = record.index_witness; - auto vc1_witness = record.value_column1_witness; // state[0] from RomTranscript - auto vc2_witness = record.value_column2_witness; // state[1] from RomTranscript - auto record_witness = record.record_witness; - - if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && q_c.is_zero()) { - // By default ROM read gate uses variables (w_1, w_2, w_3, w_4) = (index_witness, vc1_witness, vc2_witness, - // record_witness) So we can update all of them - gate_variables.emplace_back(index_witness); - if (vc1_witness != ultra_builder.zero_idx) { - gate_variables.emplace_back(vc1_witness); + if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && + q_c.is_zero()) { + // By default ROM read gate uses variables (w_1, w_2, w_3, w_4) = (index_witness, vc1_witness, + // vc2_witness, record_witness) So we can update all of them + gate_variables.emplace_back(index_witness); + if (vc1_witness != circuit_builder.zero_idx()) { + gate_variables.emplace_back(vc1_witness); + } + if (vc2_witness != circuit_builder.zero_idx()) { + gate_variables.emplace_back(vc2_witness); + } + gate_variables.emplace_back(record_witness); } - if (vc2_witness != ultra_builder.zero_idx) { - gate_variables.emplace_back(vc2_witness); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, gate_index, *blk_idx); + // after process_gate_variables function gate_variables constists of real variables indexes, so we can + // add all this variables in the final vector to connect all of them + if (!gate_variables.empty()) { + rom_table_variables.insert(rom_table_variables.end(), gate_variables.begin(), gate_variables.end()); } - gate_variables.emplace_back(record_witness); - } - gate_variables = this->to_real(ultra_builder, gate_variables); - this->process_gate_variables(gate_variables, gate_index, block_index); - // after process_gate_variables function gate_variables constists of real variables indexes, so we can add all - // this variables in the final vector to connect all of them - if (!gate_variables.empty()) { - rom_table_variables.insert(rom_table_variables.end(), gate_variables.begin(), gate_variables.end()); } } return rom_table_variables; @@ -515,168 +529,239 @@ inline std::vector StaticAnalyzer_::get_rom_table_connected_compon * @param ram_array RAM transcript containing records with witness indices and gate information * @return std::vector vector of connected variables from RAM table gates */ -template -inline std::vector StaticAnalyzer_::get_ram_table_connected_component( - bb::UltraCircuitBuilder& ultra_builder, const bb::RamTranscript& ram_array) +template +inline std::vector StaticAnalyzer_::get_ram_table_connected_component( + const bb::RamTranscript& ram_array) { - size_t block_index = find_block_index(ultra_builder, ultra_builder.blocks.memory); - BB_ASSERT_EQ(block_index, 5U); std::vector ram_table_variables; - for (const auto& record : ram_array.records) { - std::vector gate_variables; - size_t gate_index = record.gate_index; - - auto q_1 = ultra_builder.blocks.memory.q_1()[gate_index]; - auto q_2 = ultra_builder.blocks.memory.q_2()[gate_index]; - auto q_3 = ultra_builder.blocks.memory.q_3()[gate_index]; - auto q_4 = ultra_builder.blocks.memory.q_4()[gate_index]; - auto q_m = ultra_builder.blocks.memory.q_m()[gate_index]; - auto q_c = ultra_builder.blocks.memory.q_c()[gate_index]; - - auto index_witness = record.index_witness; - auto timestamp_witness = record.timestamp_witness; - auto value_witness = record.value_witness; - auto record_witness = record.record_witness; - - if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && - (q_c.is_zero() || q_c == FF::one())) { - // By default RAM read/write gate uses variables (w_1, w_2, w_3, w_4) = (index_witness, timestamp_witness, - // value_witness, record_witness) So we can update all of them - gate_variables.emplace_back(index_witness); - if (timestamp_witness != ultra_builder.zero_idx) { - gate_variables.emplace_back(timestamp_witness); - } - if (value_witness != ultra_builder.zero_idx) { - gate_variables.emplace_back(value_witness); + if (std::optional blk_idx = find_block_index(circuit_builder.blocks.memory); blk_idx) { + for (const auto& record : ram_array.records) { + std::vector gate_variables; + size_t gate_index = record.gate_index; + + auto q_1 = circuit_builder.blocks.memory.q_1()[gate_index]; + auto q_2 = circuit_builder.blocks.memory.q_2()[gate_index]; + auto q_3 = circuit_builder.blocks.memory.q_3()[gate_index]; + auto q_4 = circuit_builder.blocks.memory.q_4()[gate_index]; + auto q_m = circuit_builder.blocks.memory.q_m()[gate_index]; + auto q_c = circuit_builder.blocks.memory.q_c()[gate_index]; + + auto index_witness = record.index_witness; + auto timestamp_witness = record.timestamp_witness; + auto value_witness = record.value_witness; + auto record_witness = record.record_witness; + + if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && + (q_c.is_zero() || q_c == FF::one())) { + // By default RAM read/write gate uses variables (w_1, w_2, w_3, w_4) = (index_witness, + // timestamp_witness, value_witness, record_witness) So we can update all of them + gate_variables.emplace_back(index_witness); + if (timestamp_witness != circuit_builder.zero_idx()) { + gate_variables.emplace_back(timestamp_witness); + } + if (value_witness != circuit_builder.zero_idx()) { + gate_variables.emplace_back(value_witness); + } + gate_variables.emplace_back(record_witness); } - gate_variables.emplace_back(record_witness); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, gate_index, *blk_idx); + // after process_gate_variables function gate_variables constists of real variables indexes, so we can add + // all these variables in the final vector to connect all of them + ram_table_variables.insert(ram_table_variables.end(), gate_variables.begin(), gate_variables.end()); } - gate_variables = this->to_real(ultra_builder, gate_variables); - this->process_gate_variables(gate_variables, gate_index, block_index); - // after process_gate_variables function gate_variables constists of real variables indexes, so we can add all - // these variables in the final vector to connect all of them - ram_table_variables.insert(ram_table_variables.end(), gate_variables.begin(), gate_variables.end()); } return ram_table_variables; } /** - * @brief Construct a new StaticAnalyzer from Ultra Circuit Builder - * @tparam FF field type used in the circuit - * @param ultra_circuit_constructor circuit builder containing all gates and variables - * @details This constructor initializes the graph structure by: - * 1) Creating data structures for tracking: - * - Number of gates each variable appears in (variables_gate_counts) - * - Adjacency lists for each variable (variable_adjacency_lists) - * - Degree of each variable (variables_degree) - * 2) Processing different types of gates: - * - Arithmetic gates - * - Elliptic curve gates - * - Plookup gates - * - Poseidon2 gates - * - Memory gates - * - Non-native field gates - * - Delta range gates - * 3) Creating connections between variables that appear in the same gate - * 4) Special handling for sorted constraints in delta range blocks + * @brief this method creates connected components from databus gates + * @tparam FF field type + * @param index index of the current gate + * @param block_idx index of the current block + * @param blk block containing the gates + * @return std::vector vector of connected variables from the gate + * @details Processes databus read operations by collecting variables from left and right wires */ -template -StaticAnalyzer_::StaticAnalyzer_(bb::UltraCircuitBuilder& ultra_circuit_constructor, bool connect_variables) +template +inline std::vector StaticAnalyzer_::get_databus_connected_component(size_t index, + size_t block_idx, + auto& blk) { - this->variables_gate_counts = - std::unordered_map(ultra_circuit_constructor.real_variable_index.size()); - this->variable_adjacency_lists = - std::unordered_map>(ultra_circuit_constructor.real_variable_index.size()); - this->variables_degree = std::unordered_map(ultra_circuit_constructor.real_variable_index.size()); - for (const auto& variable_index : ultra_circuit_constructor.real_variable_index) { - variables_gate_counts[variable_index] = 0; - variables_degree[variable_index] = 0; - variable_adjacency_lists[variable_index] = {}; + std::vector gate_variables; + if (!blk.q_busread()[index].is_zero()) { + gate_variables.insert(gate_variables.end(), { blk.w_l()[index], blk.w_r()[index] }); + gate_variables = to_real(gate_variables); + process_gate_variables(gate_variables, index, block_idx); + } + return gate_variables; +} + +/** + * @brief this method creates connected components from elliptic curve operation gates + * @tparam FF field type + * @param index index of the current gate + * @param block_idx index of the current block + * @param blk block containing the gates + * @return std::vector vector of connected variables from the gate + * @details Processes elliptic curve operations by collecting variables from current and next gates, + * handling opcodes and coordinate variables for curve operations + */ +template +inline std::vector StaticAnalyzer_::get_eccop_part_connected_component(size_t index, + size_t block_idx, + auto& blk) +{ + std::vector gate_variables; + std::vector first_row_variables; + std::vector second_row_variables; + auto w1 = blk.w_l()[index]; // get opcode of operation, because function get_ecc_op_idx returns type + // uint32_t and it adds as w1 + if (w1 != circuit_builder.zero_idx()) { + // this is opcode and start of the UltraOp element + first_row_variables.insert( + first_row_variables.end(), + { w1, blk.w_r()[index], blk.w_o()[index], blk.w_4()[index] }); // add op, x_lo, x_hi, y_lo + if (index < blk.size() - 1) { + second_row_variables.insert( + second_row_variables.end(), + { blk.w_r()[index + 1], blk.w_o()[index + 1], blk.w_4()[index + 1] }); // add y_hi, z1, z2 + } + first_row_variables = to_real(first_row_variables); + second_row_variables = to_real(second_row_variables); + process_gate_variables(first_row_variables, index, block_idx); + process_gate_variables(second_row_variables, index, block_idx); + } + if (!first_row_variables.empty()) { + gate_variables.insert(gate_variables.end(), first_row_variables.cbegin(), first_row_variables.cend()); + } + if (!second_row_variables.empty()) { + gate_variables.insert(gate_variables.end(), second_row_variables.cbegin(), second_row_variables.cend()); + } + return gate_variables; +} + +template void StaticAnalyzer_::process_execution_trace() +{ + auto block_data = circuit_builder.blocks.get(); + + // We have to determine pub_inputs block index based on circuit builder type, because we have to skip it. + // If type of CircuitBuilder is UltraCircuitBuilder, the pub_inputs block is the first block so we can set + // pub_inputs_block_idx + size_t pub_inputs_block_idx = 0; + + // For MegaCircuitBuilder, pub_inputs block has index 3 + if constexpr (IsMegaBuilder) { + pub_inputs_block_idx = 3; } - std::map constant_variable_indices = ultra_circuit_constructor.constant_variable_indices; - auto block_data = ultra_circuit_constructor.blocks.get(); - for (size_t blk_idx = 1; blk_idx < block_data.size() - 1; blk_idx++) { - if (block_data[blk_idx].size() == 0) { + for (size_t blk_idx = 0; blk_idx < block_data.size() - 1; blk_idx++) { + if (block_data[blk_idx].size() == 0 || blk_idx == pub_inputs_block_idx) { continue; } std::vector sorted_variables; + std::vector eccop_variables; for (size_t gate_idx = 0; gate_idx < block_data[blk_idx].size(); gate_idx++) { - auto arithmetic_gates_variables = get_arithmetic_gate_connected_component( - ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (!arithmetic_gates_variables.empty() && connect_variables) { - for (const auto& gate_variables : arithmetic_gates_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, gate_variables); - } - } - auto elliptic_gate_variables = get_elliptic_gate_connected_component( - ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, elliptic_gate_variables); - } - auto lookup_gate_variables = - get_plookup_gate_connected_component(ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, lookup_gate_variables); - } - auto poseidon2_gate_variables = get_poseido2s_gate_connected_component( - ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, poseidon2_gate_variables); - } - auto memory_gate_variables = - get_memory_gate_connected_component(ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, memory_gate_variables); - } - auto nnf_gate_variables = get_non_native_field_gate_connected_component( - ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, nnf_gate_variables); + std::vector> all_cc = { + get_arithmetic_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_elliptic_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_plookup_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_poseido2s_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_non_native_field_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_memory_gate_connected_component(gate_idx, blk_idx, block_data[blk_idx]), + get_sort_constraint_connected_component(gate_idx, blk_idx, block_data[blk_idx]) + }; + auto non_empty_count = + std::count_if(all_cc.begin(), all_cc.end(), [](const auto& vec) { return !vec.empty(); }); + ASSERT(non_empty_count < 2U); + auto not_empty_cc_it = + std::find_if(all_cc.begin(), all_cc.end(), [](const auto& vec) { return !vec.empty(); }); + if (not_empty_cc_it != all_cc.end() && connect_variables) { + connect_all_variables_in_vector(*not_empty_cc_it); } - if (arithmetic_gates_variables.empty() && elliptic_gate_variables.empty() && - lookup_gate_variables.empty() && poseidon2_gate_variables.empty() && memory_gate_variables.empty() && - nnf_gate_variables.empty()) { - // if all vectors are empty it means that current block is delta range, and it needs another - // processing method - auto delta_range_gate_variables = get_sort_constraint_connected_component( - ultra_circuit_constructor, gate_idx, blk_idx, block_data[blk_idx]); - if (delta_range_gate_variables.empty()) { - if (connect_variables) { - connect_all_variables_in_vector(ultra_circuit_constructor, sorted_variables); + if constexpr (IsMegaBuilder) { + // If type of CircuitBuilder is MegaCircuitBuilder, we'll try to process blocks like they can be + // databus or eccop + auto databus_variables = get_databus_connected_component(gate_idx, blk_idx, block_data[blk_idx]); + if (connect_variables) { + connect_all_variables_in_vector(databus_variables); + } + auto eccop_gate_variables = get_eccop_part_connected_component(gate_idx, blk_idx, block_data[blk_idx]); + if (connect_variables) { + if (!eccop_gate_variables.empty()) { + // The gotten vector of variables contains all variables from UltraOp element of the table + eccop_variables.insert( + eccop_variables.end(), eccop_gate_variables.begin(), eccop_gate_variables.end()); + // if a current opcode is responsible for equality and reset, we have to connect all + // variables in global vector and clear it for the next parts + if (eccop_gate_variables[0] == circuit_builder.equality_op_idx) { + connect_all_variables_in_vector(eccop_variables); + eccop_variables.clear(); + } } - sorted_variables.clear(); - } else { - sorted_variables.insert( - sorted_variables.end(), delta_range_gate_variables.begin(), delta_range_gate_variables.end()); } } } } - const auto& rom_arrays = ultra_circuit_constructor.rom_ram_logic.rom_arrays; + const auto& rom_arrays = circuit_builder.rom_ram_logic.rom_arrays; if (!rom_arrays.empty()) { for (const auto& rom_array : rom_arrays) { - std::vector variable_indices = - this->get_rom_table_connected_component(ultra_circuit_constructor, rom_array); + std::vector variable_indices = get_rom_table_connected_component(rom_array); if (connect_variables) { - this->connect_all_variables_in_vector(ultra_circuit_constructor, variable_indices); + connect_all_variables_in_vector(variable_indices); } } } - const auto& ram_arrays = ultra_circuit_constructor.rom_ram_logic.ram_arrays; + const auto& ram_arrays = circuit_builder.rom_ram_logic.ram_arrays; if (!ram_arrays.empty()) { for (const auto& ram_array : ram_arrays) { - std::vector variable_indices = - this->get_ram_table_connected_component(ultra_circuit_constructor, ram_array); + std::vector variable_indices = get_ram_table_connected_component(ram_array); if (connect_variables) { - this->connect_all_variables_in_vector(ultra_circuit_constructor, variable_indices); + connect_all_variables_in_vector(variable_indices); } } } } +/** + * @brief Construct a new StaticAnalyzer for Ultra Circuit Builder or Mega Circuit Builder + * @tparam FF field type used in the circuit + * @param ultra_circuit_constructor circuit builder containing all gates and variables + * @details This constructor initializes the graph structure by: + * 1) Creating data structures for tracking: + * - Number of gates each variable appears in (variables_gate_counts) + * - Adjacency lists for each variable (variable_adjacency_lists) + * - Degree of each variable (variables_degree) + * 2) Processing different types of gates: + * - Arithmetic gates + * - Elliptic curve gates + * - Plookup gates + * - Poseidon2 gates + * - Memory gates + * - Non-native field gates + * - Delta range gates + * 3) Creating connections between variables that appear in the same gate + * 4) Special handling for sorted constraints in delta range blocks + */ +template +StaticAnalyzer_::StaticAnalyzer_(CircuitBuilder& circuit_builder, bool connect_variables) + : circuit_builder(circuit_builder) + , connect_variables(connect_variables) +{ + variables_gate_counts = std::unordered_map(circuit_builder.real_variable_index.size()); + variable_adjacency_lists = + std::unordered_map>(circuit_builder.real_variable_index.size()); + variables_degree = std::unordered_map(circuit_builder.real_variable_index.size()); + for (const auto& variable_index : circuit_builder.real_variable_index) { + variables_gate_counts[variable_index] = 0; + variables_degree[variable_index] = 0; + variable_adjacency_lists[variable_index] = {}; + } + process_execution_trace(); +} + /** * @brief this method checks whether the variable with given index is not constant * @tparam FF @@ -686,14 +771,13 @@ StaticAnalyzer_::StaticAnalyzer_(bb::UltraCircuitBuilder& ultra_circuit_cons * @return false */ -template -bool StaticAnalyzer_::check_is_not_constant_variable(bb::UltraCircuitBuilder& ultra_circuit_builder, - const uint32_t& variable_index) +template +bool StaticAnalyzer_::check_is_not_constant_variable(const uint32_t& variable_index) { bool is_not_constant = true; - const auto& constant_variable_indices = ultra_circuit_builder.constant_variable_indices; + const auto& constant_variable_indices = circuit_builder.constant_variable_indices; for (const auto& pair : constant_variable_indices) { - if (pair.second == ultra_circuit_builder.real_variable_index[variable_index]) { + if (pair.second == circuit_builder.real_variable_index[variable_index]) { is_not_constant = false; break; } @@ -712,9 +796,8 @@ bool StaticAnalyzer_::check_is_not_constant_variable(bb::UltraCircuitBuilder * @param is_sorted_variables */ -template -void StaticAnalyzer_::connect_all_variables_in_vector(bb::UltraCircuitBuilder& ultra_circuit_builder, - const std::vector& variables_vector) +template +void StaticAnalyzer_::connect_all_variables_in_vector(const std::vector& variables_vector) { if (variables_vector.empty()) { return; @@ -726,8 +809,8 @@ void StaticAnalyzer_::connect_all_variables_in_vector(bb::UltraCircuitBuilde variables_vector.end(), std::back_inserter(filtered_variables_vector), [&](uint32_t variable_index) { - return variable_index != ultra_circuit_builder.zero_idx && - this->check_is_not_constant_variable(ultra_circuit_builder, variable_index); + return variable_index != circuit_builder.zero_idx() && + this->check_is_not_constant_variable(variable_index); }); // Remove duplicates auto unique_pointer = std::unique(filtered_variables_vector.begin(), filtered_variables_vector.end()); @@ -736,7 +819,7 @@ void StaticAnalyzer_::connect_all_variables_in_vector(bb::UltraCircuitBuilde return; } for (size_t i = 0; i < filtered_variables_vector.size() - 1; i++) { - this->add_new_edge(filtered_variables_vector[i], filtered_variables_vector[i + 1]); + add_new_edge(filtered_variables_vector[i], filtered_variables_vector[i + 1]); } } @@ -747,8 +830,9 @@ void StaticAnalyzer_::connect_all_variables_in_vector(bb::UltraCircuitBuilde * @param second_variable_index */ -template -void StaticAnalyzer_::add_new_edge(const uint32_t& first_variable_index, const uint32_t& second_variable_index) +template +void StaticAnalyzer_::add_new_edge(const uint32_t& first_variable_index, + const uint32_t& second_variable_index) { variable_adjacency_lists[first_variable_index].emplace_back(second_variable_index); variable_adjacency_lists[second_variable_index].emplace_back(first_variable_index); @@ -764,10 +848,10 @@ void StaticAnalyzer_::add_new_edge(const uint32_t& first_variable_index, con * @param connected_component */ -template -void StaticAnalyzer_::depth_first_search(const uint32_t& variable_index, - std::unordered_set& is_used, - std::vector& connected_component) +template +void StaticAnalyzer_::depth_first_search(const uint32_t& variable_index, + std::unordered_set& is_used, + std::vector& connected_component) { std::stack variable_stack; variable_stack.push(variable_index); @@ -787,27 +871,91 @@ void StaticAnalyzer_::depth_first_search(const uint32_t& variable_index, /** * @brief this methond finds all connected components in the graph described by adjacency lists * @tparam FF - * @return std::vector> list of connected components where each component is a vector of variable - * indices + * @return std::vector> list of connected components where each component is a vector of + * variable indices */ -template std::vector> StaticAnalyzer_::find_connected_components() +template +std::vector StaticAnalyzer_::find_connected_components( + bool return_all_connected_components) { - std::unordered_set is_used; - std::vector> connected_components; + if (!connect_variables) { + throw std::runtime_error("find_connected_components() can only be called when connect_variables is true"); + } + std::unordered_set visited; for (const auto& pair : variable_adjacency_lists) { if (pair.first != 0 && variables_degree[pair.first] > 0) { - if (!is_used.contains(pair.first)) { - std::vector connected_component; - this->depth_first_search(pair.first, is_used, connected_component); - std::sort(connected_component.begin(), connected_component.end()); - connected_components.emplace_back(connected_component); + if (!visited.contains(pair.first)) { + std::vector variable_indices; + depth_first_search(pair.first, visited, variable_indices); + std::sort(variable_indices.begin(), variable_indices.end()); + connected_components.emplace_back(ConnectedComponent(variable_indices)); + } + } + } + mark_range_list_connected_components(); + mark_finalize_connected_components(); + if (!return_all_connected_components) { + main_connected_components.reserve(connected_components.size()); + for (auto& cc : connected_components) { + if (!cc.is_range_list_cc && !cc.is_finalize_cc) { + main_connected_components.emplace_back(std::move(cc)); } } + return main_connected_components; } return connected_components; } +/** + * @brief this method marks some connected componets like they represent range lists + * tool needs this method to remove range lists because after method finalize was called + * because they aren't connected to other variables in a circuit. It's intended behaviout but the tool shows them as + * another connected component + * @tparam FF + * @tparam CircuitBuilder + */ + +template +void StaticAnalyzer_::mark_range_list_connected_components() +{ + const auto& tags = circuit_builder.real_variable_tags; + std::unordered_set tau_tags; + for (const auto& pair : circuit_builder.range_lists) { + tau_tags.insert(pair.second.tau_tag); + } + for (auto& cc : connected_components) { + const auto& variables = cc.variable_indices; + const uint32_t first_tag = tags[variables[0]]; + if (tau_tags.contains(first_tag)) { + cc.is_range_list_cc = + std::all_of(variables.begin() + 1, variables.end(), [&tags, first_tag](uint32_t var_idx) { + return tags[var_idx] == first_tag; + }); + } + } +} + +/** + * @brief this method marks some connected components like they represent separated finalize blocks + * the point is finalize method create additional gates for ecc_op in databus in Mega case and they aren't connected + * to other variables in the circuit. It's intended behaviour but the tool shows them as another connected component + * @tparam FF + * @tparam CircuitBuilder + */ + +template +void StaticAnalyzer_::mark_finalize_connected_components() +{ + const auto& finalize_witnesses = circuit_builder.finalize_witnesses; + for (auto& cc : connected_components) { + const auto& vars = cc.vars(); + cc.is_finalize_cc = std::all_of(vars.begin(), vars.end(), [&finalize_witnesses](uint32_t var_idx) { + return finalize_witnesses.contains(var_idx); + }); + } +} + /** * @brief this method removes variables that were created in a function decompose_into_default_range * because they are false cases and don't give any useful information about security of the circuit. @@ -823,31 +971,29 @@ template std::vector> StaticAnalyzer_::f * @return size_t */ -template -inline size_t StaticAnalyzer_::process_current_decompose_chain(bb::UltraCircuitBuilder& ultra_circuit_constructor, - std::unordered_set& variables_in_one_gate, - size_t index) +template +inline size_t StaticAnalyzer_::process_current_decompose_chain(size_t index) { - auto& arithmetic_block = ultra_circuit_constructor.blocks.arithmetic; - auto zero_idx = ultra_circuit_constructor.zero_idx; + auto& arithmetic_block = circuit_builder.blocks.arithmetic; + auto zero_idx = circuit_builder.zero_idx(); size_t current_index = index; std::vector accumulators_indices; while (true) { - // we have to remove left, right and output wires of the current gate, cause they'are new_limbs, and they are - // useless for the analyzer + // we have to remove left, right and output wires of the current gate, cause they'are new_limbs, and they + // are useless for the analyzer auto fourth_idx = arithmetic_block.w_4()[current_index]; - accumulators_indices.emplace_back(this->to_real(ultra_circuit_constructor, fourth_idx)); + accumulators_indices.emplace_back(this->to_real(fourth_idx)); auto left_idx = arithmetic_block.w_l()[current_index]; if (left_idx != zero_idx) { - variables_in_one_gate.erase(this->to_real(ultra_circuit_constructor, left_idx)); + variables_in_one_gate.erase(this->to_real(left_idx)); } auto right_idx = arithmetic_block.w_r()[current_index]; if (right_idx != zero_idx) { - variables_in_one_gate.erase(this->to_real(ultra_circuit_constructor, right_idx)); + variables_in_one_gate.erase(this->to_real(right_idx)); } auto out_idx = arithmetic_block.w_o()[current_index]; if (out_idx != zero_idx) { - variables_in_one_gate.erase(this->to_real(ultra_circuit_constructor, out_idx)); + variables_in_one_gate.erase(this->to_real(out_idx)); } auto q_arith = arithmetic_block.q_arith()[current_index]; if (q_arith == 1 || current_index == arithmetic_block.size() - 1) { @@ -858,12 +1004,12 @@ inline size_t StaticAnalyzer_::process_current_decompose_chain(bb::UltraCirc } for (size_t i = 0; i < accumulators_indices.size(); i++) { if (i == 0) { - // the first variable in accumulators is the variable which decompose was created. So, we have to decrement - // variable_gate_counts for this variable + // the first variable in accumulators is the variable which decompose was created. So, we have to + // decrement variable_gate_counts for this variable variables_gate_counts[accumulators_indices[i]] -= 1; } else { - // next accumulators are useless variables that are not interested for the analyzer. So, for these variables - // we can nullify variables_gate_counts + // next accumulators are useless variables that are not interested for the analyzer. So, for these + // variables we can nullify variables_gate_counts variables_gate_counts[accumulators_indices[i]] = 0; } } @@ -879,17 +1025,15 @@ inline size_t StaticAnalyzer_::process_current_decompose_chain(bb::UltraCirc * @param decompose_variables */ -template -inline void StaticAnalyzer_::remove_unnecessary_decompose_variables( - bb::UltraCircuitBuilder& ultra_circuit_builder, - std::unordered_set& variables_in_one_gate, +template +inline void StaticAnalyzer_::remove_unnecessary_decompose_variables( const std::unordered_set& decompose_variables) { auto is_power_two = [&](const uint256_t& number) { return number > 0 && ((number & (number - 1)) == 0); }; auto find_position = [&](uint32_t variable_index) { - return decompose_variables.contains(this->to_real(ultra_circuit_builder, variable_index)); + return decompose_variables.contains(this->to_real(variable_index)); }; - auto& arithmetic_block = ultra_circuit_builder.blocks.arithmetic; + auto& arithmetic_block = circuit_builder.blocks.arithmetic; if (arithmetic_block.size() > 0) { for (size_t i = 0; i < arithmetic_block.size(); i++) { auto q_1 = arithmetic_block.q_1()[i]; @@ -916,7 +1060,7 @@ inline void StaticAnalyzer_::remove_unnecessary_decompose_variables( if (((find_left && find_right && find_out) || (find_left && find_right && !find_out) || (find_left && find_right && !find_out) || (find_left && !find_right && !find_out)) && !find_fourth) { - i = this->process_current_decompose_chain(ultra_circuit_builder, variables_in_one_gate, i); + i = this->process_current_decompose_chain(i); } } } @@ -931,27 +1075,27 @@ inline void StaticAnalyzer_::remove_unnecessary_decompose_variables( * 1) Variables from delta_range_constraints created by finalize_circuit() * 2) Variables from range_constraints created by range_constraint_into_two_limbs */ -template -void StaticAnalyzer_::remove_unnecessary_range_constrains_variables(bb::UltraCircuitBuilder& ultra_builder) +template +void StaticAnalyzer_::remove_unnecessary_range_constrains_variables() { - std::map range_lists = ultra_builder.range_lists; + std::map range_lists = circuit_builder.range_lists; std::unordered_set range_lists_tau_tags; std::unordered_set range_lists_range_tags; - std::vector real_variable_tags = ultra_builder.real_variable_tags; + std::vector real_variable_tags = circuit_builder.real_variable_tags; for (const auto& pair : range_lists) { - UltraCircuitBuilder::RangeList list = pair.second; + typename CircuitBuilder::RangeList list = pair.second; range_lists_tau_tags.insert(list.tau_tag); range_lists_range_tags.insert(list.range_tag); } for (uint32_t real_index = 0; real_index < real_variable_tags.size(); real_index++) { if (variables_in_one_gate.contains(real_index)) { - // this if helps us to remove variables from delta_range_constraints when finalize_circuit() function was - // called + // this if helps us to remove variables from delta_range_constraints when finalize_circuit() function + // was called if (range_lists_tau_tags.contains(real_variable_tags[real_index])) { variables_in_one_gate.erase(real_index); } - // this if helps us to remove variables from range_constraints when range_constraint_into_two_limbs function - // was called + // this if helps us to remove variables from range_constraints when range_constraint_into_two_limbs + // function was called if (range_lists_range_tags.contains(real_variable_tags[real_index])) { variables_in_one_gate.erase(real_index); } @@ -970,12 +1114,9 @@ void StaticAnalyzer_::remove_unnecessary_range_constrains_variables(bb::Ultr * @param table_id * @param gate_index */ -template -inline void StaticAnalyzer_::remove_unnecessary_aes_plookup_variables( - std::unordered_set& variables_in_one_gate, - UltraCircuitBuilder& ultra_circuit_builder, - BasicTableId& table_id, - size_t gate_index) +template +inline void StaticAnalyzer_::remove_unnecessary_aes_plookup_variables(BasicTableId& table_id, + size_t gate_index) { auto find_position = [&](uint32_t real_variable_index) { @@ -984,10 +1125,10 @@ inline void StaticAnalyzer_::remove_unnecessary_aes_plookup_variables( std::unordered_set aes_plookup_tables{ BasicTableId::AES_SBOX_MAP, BasicTableId::AES_SPARSE_MAP, BasicTableId::AES_SPARSE_NORMALIZE }; - auto& lookup_block = ultra_circuit_builder.blocks.lookup; + auto& lookup_block = circuit_builder.blocks.lookup; if (aes_plookup_tables.contains(table_id)) { - uint32_t real_out_idx = this->to_real(ultra_circuit_builder, lookup_block.w_o()[gate_index]); - uint32_t real_right_idx = this->to_real(ultra_circuit_builder, lookup_block.w_r()[gate_index]); + uint32_t real_out_idx = this->to_real(lookup_block.w_o()[gate_index]); + uint32_t real_right_idx = this->to_real(lookup_block.w_r()[gate_index]); if (variables_gate_counts[real_out_idx] != 1 || variables_gate_counts[real_right_idx] != 1) { bool find_out = find_position(real_out_idx); auto q_c = lookup_block.q_c()[gate_index]; @@ -1012,18 +1153,15 @@ inline void StaticAnalyzer_::remove_unnecessary_aes_plookup_variables( * @param gate_index */ -template -inline void StaticAnalyzer_::remove_unnecessary_sha256_plookup_variables( - std::unordered_set& variables_in_one_gate, - UltraCircuitBuilder& ultra_circuit_builder, - BasicTableId& table_id, - size_t gate_index) +template +inline void StaticAnalyzer_::remove_unnecessary_sha256_plookup_variables(BasicTableId& table_id, + size_t gate_index) { auto find_position = [&](uint32_t real_variable_index) { return variables_in_one_gate.contains(real_variable_index); }; - auto& lookup_block = ultra_circuit_builder.blocks.lookup; + auto& lookup_block = circuit_builder.blocks.lookup; std::unordered_set sha256_plookup_tables{ BasicTableId::SHA256_WITNESS_SLICE_3, BasicTableId::SHA256_WITNESS_SLICE_7_ROTATE_4, BasicTableId::SHA256_WITNESS_SLICE_8_ROTATE_7, @@ -1037,8 +1175,8 @@ inline void StaticAnalyzer_::remove_unnecessary_sha256_plookup_variables( BasicTableId::SHA256_BASE28_ROTATE3, BasicTableId::SHA256_BASE28_ROTATE6 }; if (sha256_plookup_tables.contains(table_id)) { - uint32_t real_right_idx = this->to_real(ultra_circuit_builder, lookup_block.w_r()[gate_index]); - uint32_t real_out_idx = this->to_real(ultra_circuit_builder, lookup_block.w_o()[gate_index]); + uint32_t real_right_idx = this->to_real(lookup_block.w_r()[gate_index]); + uint32_t real_out_idx = this->to_real(lookup_block.w_o()[gate_index]); if (variables_gate_counts[real_out_idx] != 1 || variables_gate_counts[real_right_idx] != 1) { // auto q_m = lookup_block.q_m()[gate_index]; auto q_c = lookup_block.q_c()[gate_index]; @@ -1068,16 +1206,15 @@ inline void StaticAnalyzer_::remove_unnecessary_sha256_plookup_variables( * @param gate_index */ -template -inline void StaticAnalyzer_::process_current_plookup_gate(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t gate_index) +template +inline void StaticAnalyzer_::process_current_plookup_gate(size_t gate_index) { auto find_position = [&](uint32_t real_variable_index) { return variables_in_one_gate.contains(real_variable_index); }; - auto& lookup_block = ultra_circuit_builder.blocks.lookup; - auto& lookup_tables = ultra_circuit_builder.lookup_tables; - auto table_index = static_cast(lookup_block.q_3()[gate_index]); + auto& lookup_block = circuit_builder.blocks.lookup; + auto& lookup_tables = circuit_builder.lookup_tables; + auto table_index = static_cast(static_cast(lookup_block.q_3()[gate_index])); for (const auto& table : lookup_tables) { if (table.table_index == table_index) { std::unordered_set column_1(table.column_1.begin(), table.column_1.end()); @@ -1085,30 +1222,28 @@ inline void StaticAnalyzer_::process_current_plookup_gate(bb::UltraCircuitBu std::unordered_set column_3(table.column_3.begin(), table.column_3.end()); bb::plookup::BasicTableId table_id = table.id; // false cases for AES - this->remove_unnecessary_aes_plookup_variables( - variables_in_one_gate, ultra_circuit_builder, table_id, gate_index); + this->remove_unnecessary_aes_plookup_variables(table_id, gate_index); // false cases for sha256 - this->remove_unnecessary_sha256_plookup_variables( - variables_in_one_gate, ultra_circuit_builder, table_id, gate_index); + this->remove_unnecessary_sha256_plookup_variables(table_id, gate_index); // if the amount of unique elements from columns of plookup tables = 1, it means that // variable from this column aren't used and we can remove it. if (column_1.size() == 1) { uint32_t left_idx = lookup_block.w_l()[gate_index]; - uint32_t real_left_idx = this->to_real(ultra_circuit_builder, left_idx); + uint32_t real_left_idx = this->to_real(left_idx); bool find_left = find_position(real_left_idx); if (find_left) { variables_in_one_gate.erase(real_left_idx); } } if (column_2.size() == 1) { - uint32_t real_right_idx = this->to_real(ultra_circuit_builder, lookup_block.w_r()[gate_index]); + uint32_t real_right_idx = this->to_real(lookup_block.w_r()[gate_index]); bool find_right = find_position(real_right_idx); if (find_right) { variables_in_one_gate.erase(real_right_idx); } } if (column_3.size() == 1) { - uint32_t real_out_idx = this->to_real(ultra_circuit_builder, lookup_block.w_o()[gate_index]); + uint32_t real_out_idx = this->to_real(lookup_block.w_o()[gate_index]); bool find_out = find_position(real_out_idx); if (find_out) { variables_in_one_gate.erase(real_out_idx); @@ -1125,13 +1260,13 @@ inline void StaticAnalyzer_::process_current_plookup_gate(bb::UltraCircuitBu * @param variables_in_one_gate */ -template -inline void StaticAnalyzer_::remove_unnecessary_plookup_variables(bb::UltraCircuitBuilder& ultra_circuit_builder) +template +inline void StaticAnalyzer_::remove_unnecessary_plookup_variables() { - auto& lookup_block = ultra_circuit_builder.blocks.lookup; + auto& lookup_block = circuit_builder.blocks.lookup; if (lookup_block.size() > 0) { for (size_t i = 0; i < lookup_block.size(); i++) { - this->process_current_plookup_gate(ultra_circuit_builder, i); + this->process_current_plookup_gate(i); } } } @@ -1144,38 +1279,38 @@ inline void StaticAnalyzer_::remove_unnecessary_plookup_variables(bb::UltraC * @param ultra_builder */ -template -inline void StaticAnalyzer_::remove_record_witness_variables(bb::UltraCircuitBuilder& ultra_builder) +template +inline void StaticAnalyzer_::remove_record_witness_variables() { - auto block_data = ultra_builder.blocks.get(); - size_t blk_idx = find_block_index(ultra_builder, ultra_builder.blocks.memory); - std::vector to_remove; - BB_ASSERT_EQ(blk_idx, 5U); - for (const auto& var_idx : variables_in_one_gate) { - KeyPair key = { var_idx, blk_idx }; - if (auto search = variable_gates.find(key); search != variable_gates.end()) { - std::vector gate_indexes = variable_gates[key]; - BB_ASSERT_EQ(gate_indexes.size(), 1U); - size_t gate_idx = gate_indexes[0]; - auto q_1 = block_data[blk_idx].q_1()[gate_idx]; - auto q_2 = block_data[blk_idx].q_2()[gate_idx]; - auto q_3 = block_data[blk_idx].q_3()[gate_idx]; - auto q_4 = block_data[blk_idx].q_4()[gate_idx]; - auto q_m = block_data[blk_idx].q_m()[gate_idx]; - auto q_arith = block_data[blk_idx].q_arith()[gate_idx]; - if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && - q_arith.is_zero()) { - // record witness can be in both ROM and RAM gates, so we can ignore q_c - // record witness is written as 4th variable in RAM/ROM read/write gate, so we can get 4th wire value - // and check it with our variable - if (this->to_real(ultra_builder, block_data[blk_idx].w_4()[gate_idx]) == var_idx) { - to_remove.emplace_back(var_idx); + auto block_data = circuit_builder.blocks.get(); + if (std::optional blk_idx = find_block_index(circuit_builder.blocks.memory); blk_idx) { + std::vector to_remove; + for (const auto& var_idx : variables_in_one_gate) { + KeyPair key = { var_idx, *blk_idx }; + if (auto search = variable_gates.find(key); search != variable_gates.end()) { + std::vector gate_indexes = variable_gates[key]; + BB_ASSERT_EQ(gate_indexes.size(), 1U); + size_t gate_idx = gate_indexes[0]; + auto q_1 = block_data[*blk_idx].q_1()[gate_idx]; + auto q_2 = block_data[*blk_idx].q_2()[gate_idx]; + auto q_3 = block_data[*blk_idx].q_3()[gate_idx]; + auto q_4 = block_data[*blk_idx].q_4()[gate_idx]; + auto q_m = block_data[*blk_idx].q_m()[gate_idx]; + auto q_arith = block_data[*blk_idx].q_arith()[gate_idx]; + if (q_1 == FF::one() && q_m == FF::one() && q_2.is_zero() && q_3.is_zero() && q_4.is_zero() && + q_arith.is_zero()) { + // record witness can be in both ROM and RAM gates, so we can ignore q_c + // record witness is written as 4th variable in RAM/ROM read/write gate, so we can get 4th + // wire value and check it with our variable + if (this->to_real(block_data[*blk_idx].w_4()[gate_idx]) == var_idx) { + to_remove.emplace_back(var_idx); + } } } } - } - for (const auto& elem : to_remove) { - variables_in_one_gate.erase(elem); + for (const auto& elem : to_remove) { + variables_in_one_gate.erase(elem); + } } } @@ -1186,198 +1321,348 @@ inline void StaticAnalyzer_::remove_record_witness_variables(bb::UltraCircui * @return std::unordered_set set of variable indices */ -template -std::unordered_set StaticAnalyzer_::show_variables_in_one_gate( - bb::UltraCircuitBuilder& ultra_circuit_builder) +template +std::unordered_set StaticAnalyzer_::get_variables_in_one_gate() { for (const auto& pair : variables_gate_counts) { - bool is_not_constant_variable = this->check_is_not_constant_variable(ultra_circuit_builder, pair.first); + bool is_not_constant_variable = check_is_not_constant_variable(pair.first); if (pair.second == 1 && pair.first != 0 && is_not_constant_variable) { - this->variables_in_one_gate.insert(pair.first); + variables_in_one_gate.insert(pair.first); } } - auto range_lists = ultra_circuit_builder.range_lists; - std::unordered_set decompose_varialbes; + auto range_lists = circuit_builder.range_lists; + std::unordered_set decompose_variables; for (auto& pair : range_lists) { for (auto& elem : pair.second.variable_indices) { - bool is_not_constant_variable = this->check_is_not_constant_variable(ultra_circuit_builder, elem); - if (variables_gate_counts[ultra_circuit_builder.real_variable_index[elem]] == 1 && - is_not_constant_variable) { - decompose_varialbes.insert(ultra_circuit_builder.real_variable_index[elem]); + bool is_not_constant_variable = check_is_not_constant_variable(elem); + if (variables_gate_counts[circuit_builder.real_variable_index[elem]] == 1 && is_not_constant_variable) { + decompose_variables.insert(circuit_builder.real_variable_index[elem]); } } } - this->remove_unnecessary_decompose_variables( - ultra_circuit_builder, this->variables_in_one_gate, decompose_varialbes); - this->remove_unnecessary_plookup_variables(ultra_circuit_builder); - this->remove_unnecessary_range_constrains_variables(ultra_circuit_builder); - for (const auto& elem : this->fixed_variables) { - this->variables_in_one_gate.erase(elem); + remove_unnecessary_decompose_variables(decompose_variables); + remove_unnecessary_plookup_variables(); + remove_unnecessary_range_constrains_variables(); + for (const auto& elem : fixed_variables) { + variables_in_one_gate.erase(elem); } // we found variables that were in one gate and they are intended cases. // so we have to remove them from the scope - for (const auto& elem : ultra_circuit_builder.get_used_witnesses()) { - this->variables_in_one_gate.erase(elem); + for (const auto& elem : circuit_builder.get_used_witnesses()) { + variables_in_one_gate.erase(elem); } - this->remove_record_witness_variables(ultra_circuit_builder); + remove_record_witness_variables(); return variables_in_one_gate; } /** - * @brief this method returns connected component with a given index and its size - * @param connected_components vector of all connected components - * @param index index of required component - * @return std::pair, size_t> pair of component and its size + * @brief this method prints additional information about connected components that were found in the graph + * @tparam FF + */ +template +void StaticAnalyzer_::print_connected_components_info() +{ + for (size_t i = 0; i < main_connected_components.size(); i++) { + info("size of ", i + 1, " connected component == ", main_connected_components[i].size(), ":"); + info("Does connected component represent range list? ", main_connected_components[i].is_range_list_cc); + info("Does connected component represent something from finalize? ", + main_connected_components[i].is_finalize_cc); + if (main_connected_components[i].size() < 50) { + for (const auto& elem : main_connected_components[i].vars()) { + info("elem == ", elem); + } + } + } +} + +/** + * @brief this method prints a number of gates for each variable + * @tparam FF */ -std::pair, size_t> get_connected_component_with_index( - const std::vector>& connected_components, size_t index) +template void StaticAnalyzer_::print_variables_gate_counts() { - auto connected_component = connected_components[index]; - auto size = connected_component.size(); - return std::make_pair(connected_component, size); + for (const auto& it : variables_gate_counts) { + info("number of gates with variables ", it.first, " == ", it.second); + } } /** - * @brief this method prints graph as vertices and their adjacency lists - * example: we have an undirected graph from 3 variables: a, b, c. - * we have edges: a - b, b - c, c - a. - * so, there will be next adjacency lists: - * a: b -> c -> 0\ - * b: a -> c -> 0\ - * c: a -> b -> 0\ + * @brief this method prints all information about arithmetic gate where variable was found * @tparam FF + * @param ultra_builder + * @param real_idx */ +template +void StaticAnalyzer_::print_arithmetic_gate_info(size_t gate_index, auto& block) +{ + auto q_arith = block.q_arith()[gate_index]; + if (!q_arith.is_zero()) { + info("q_arith == ", q_arith); + // fisrtly, print selectors for standard plonk gate + info("q_m == ", block.q_m()[gate_index]); + info("q1 == ", block.q_1()[gate_index]); + info("q2 == ", block.q_2()[gate_index]); + info("q3 == ", block.q_3()[gate_index]); + info("q4 == ", block.q_4()[gate_index]); + info("q_c == ", block.q_c()[gate_index]); -template void StaticAnalyzer_::print_graph() + if (q_arith == FF(2)) { + // we have to print w_4_shift from next gate + info("w_4_shift == ", block.w_4()[gate_index + 1]); + } + if (q_arith == FF(3)) { + // we have to print w_4_shift and w_1_shift from the next gate + info("w_1_shift == ", block.w_l()[gate_index + 1]); + info("w_4_shift == ", block.w_4()[gate_index + 1]); + } + } else { + return; + } +} + +/** + * @brief this method prints all information about elliptic gate where variable was found + * @tparam FF + * @param ultra_builder + * @param real_idx + */ +template +void StaticAnalyzer_::print_elliptic_gate_info(size_t gate_index, auto& block) { - for (const auto& elem : variable_adjacency_lists) { - info("variable with index ", elem.first); - if (variable_adjacency_lists[elem.first].empty()) { - info("is isolated"); - } else { - for (const auto& it : elem.second) { - info(it); - } + auto q_elliptic = block.q_elliptic()[gate_index]; + if (!q_elliptic.is_zero()) { + info("q_elliptic == ", q_elliptic); + info("q_1 == ", block.q_1()[gate_index]); + info("q_m == ", block.q_m()[gate_index]); + bool is_elliptic_add_gate = !block.q_1()[gate_index].is_zero() && block.q_m()[gate_index].is_zero(); + bool is_elliptic_dbl_gate = block.q_1()[gate_index].is_zero() && block.q_m()[gate_index] == FF::one(); + if (is_elliptic_add_gate) { + info("x2 == ", block.w_l()[gate_index + 1]); + info("x3 == ", block.w_r()[gate_index + 1]); + info("y3 == ", block.w_o()[gate_index + 1]); + info("y2 == ", block.w_4()[gate_index + 1]); + } + if (is_elliptic_dbl_gate) { + info("x3 == ", block.w_r()[gate_index + 1]); + info("y3 == ", block.w_o()[gate_index + 1]); } + } else { + return; } } /** - * @brief this method prints all connected components that were found in the graph + * @brief this method prints all information about plookup gate where variable was found * @tparam FF + * @param ultra_builder + * @param real_idx */ -template void StaticAnalyzer_::print_connected_components() +template +void StaticAnalyzer_::print_plookup_gate_info(size_t gate_index, auto& block) { - auto connected_components = find_connected_components(); - for (size_t i = 0; i < connected_components.size(); i++) { - info("printing the ", i + 1, " connected component with size ", connected_components[i].size(), ":"); - for (const auto& it : connected_components[i]) { - info(it, " "); + auto q_lookup = block.q_lookup_type()[gate_index]; + if (!q_lookup.is_zero()) { + info("q_lookup == ", q_lookup); + auto q_2 = block.q_2()[gate_index]; + auto q_m = block.q_m()[gate_index]; + auto q_c = block.q_c()[gate_index]; + info("q_2 == ", q_2); + info("q_m == ", q_m); + info("q_c == ", q_c); + if (!q_2.is_zero()) { + info("w_1_shift == ", block.w_l()[gate_index + 1]); } + if (!q_m.is_zero()) { + info("w_2_shift == ", block.w_r()[gate_index + 1]); + } + if (!q_c.is_zero()) { + info("w_3_shift == ", block.w_o()[gate_index + 1]); + } + } else { + return; } } /** - * @brief this method prints a number of gates for each variable + * @brief this method prints all information about range constrain gate where variable was found * @tparam FF + * @param ultra_builder + * @param real_idx */ -template void StaticAnalyzer_::print_variables_gate_counts() +template +void StaticAnalyzer_::print_delta_range_gate_info(size_t gate_index, auto& block) { - for (const auto& it : variables_gate_counts) { - info("number of gates with variables ", it.first, " == ", it.second); + auto q_delta_range = block.q_delta_range()[gate_index]; + if (!q_delta_range.is_zero()) { + info("q_delta_range == ", q_delta_range); + info("w_1 == ", block.w_l()[gate_index]); + info("w_2 == ", block.w_r()[gate_index]); + info("w_3 == ", block.w_o()[gate_index]); + info("w_4 == ", block.w_4()[gate_index]); + info("w_1_shift == ", block.w_l()[gate_index]); + } else { + return; + } +} + +/** + * @brief this method prints all information about poseidon2s gate where variable was found + * @tparam FF + * @param ultra_builder + * @param real_idx + */ + +template +void StaticAnalyzer_::print_poseidon2s_gate_info(size_t gate_index, auto& block) +{ + auto internal_selector = block.q_poseidon2_internal()[gate_index]; + auto external_selector = block.q_poseidon2_external()[gate_index]; + if (!internal_selector.is_zero() || !external_selector.is_zero()) { + info("q_poseidon2_internal == ", internal_selector); + info("q_poseidon2_external == ", external_selector); + info("w_1 == ", block.w_l()[gate_index]); + info("w_2 == ", block.w_r()[gate_index]); + info("w_3 == ", block.w_o()[gate_index]); + info("w_4 == ", block.w_4()[gate_index]); + info("w_1_shift == ", block.w_l()[gate_index + 1]); + info("w_2_shift == ", block.w_r()[gate_index + 1]); + info("w_3_shift == ", block.w_o()[gate_index + 1]); + info("w_4_shift == ", block.w_4()[gate_index + 1]); + } else { + return; + } +} + +/** + * @brief this method prints all information about non natife field gate where variable was found + * @tparam FF + * @param ultra_builder + * @param real_idx + */ + +template +void StaticAnalyzer_::print_nnf_gate_info(size_t gate_idx, auto& block) +{ + auto q_nnf = block.q_nnf()[gate_idx]; + if (!q_nnf.is_zero()) { + info("q_nnf == ", q_nnf); + auto q_2 = block.q_2()[gate_idx]; + auto q_3 = block.q_3()[gate_idx]; + auto q_4 = block.q_4()[gate_idx]; + auto q_m = block.q_m()[gate_idx]; + if (q_3 == FF::one() && q_4 == FF::one()) { + info("w_1_shift == ", block.w_l()[gate_idx + 1]); + info("w_2_shift == ", block.w_r()[gate_idx + 1]); + + } else if (q_3 == FF::one() && q_m == FF::one()) { + info("w_1_shift == ", block.w_l()[gate_idx + 1]); + info("w_2_shift == ", block.w_r()[gate_idx + 1]); + info("w_3_shift == ", block.w_o()[gate_idx + 1]); + info("w_4_shift == ", block.w_4()[gate_idx + 1]); + } else if (q_2 == FF::one() && (q_3 == FF::one() || q_4 == FF::one() || q_m == FF::one())) { + info("w_1_shift == ", block.w_l()[gate_idx + 1]); + info("w_2_shift == ", block.w_r()[gate_idx + 1]); + if (q_4 == FF::one() || q_m == FF::one()) { + info("w_3_shift == ", block.w_o()[gate_idx + 1]); + info("w_4_shift == ", block.w_4()[gate_idx + 1]); + } + } + } else { + return; + } +} + +/** + * @brief this method prints all information about memory gate where variable was found + * @tparam FF + * @param ultra_builder + * @param real_idx + */ + +template +void StaticAnalyzer_::print_memory_gate_info(size_t gate_index, auto& block) +{ + auto q_memory = block.q_memory()[gate_index]; + if (!q_memory.is_zero()) { + info("q_memory == ", q_memory); + auto q_1 = block.q_1()[gate_index]; + auto q_2 = block.q_2()[gate_index]; + auto q_3 = block.q_3()[gate_index]; + auto q_4 = block.q_4()[gate_index]; + if (q_1 == FF::one() && q_4 == FF::one()) { + info("w_1_shift == ", block.w_l()[gate_index + 1]); + info("w_2_shift == ", block.w_r()[gate_index + 1]); + } else if (q_1 == FF::one() && q_2 == FF::one()) { + info("w_1_shift == ", block.w_l()[gate_index + 1]); + info("w_4_shift == ", block.w_4()[gate_index + 1]); + } else if (!q_3.is_zero()) { + info("w_1_shift == ", block.w_l()[gate_index + 1]); + info("w_2_shift == ", block.w_r()[gate_index + 1]); + info("w_3_shift == ", block.w_o()[gate_index + 1]); + info("w_4_shift == ", block.w_4()[gate_index + 1]); + } + } else { + return; } } /** - * @brief this method prints all information about the gate where variable was found + * @brief this method prints all information about gates where variable was found * @tparam FF * @param ultra_builder * @param real_idx */ -template -void StaticAnalyzer_::print_variable_in_one_gate(bb::UltraCircuitBuilder& ultra_builder, const uint32_t real_idx) +template +void StaticAnalyzer_::print_variable_info(const uint32_t real_idx) { - const auto& block_data = ultra_builder.blocks.get(); + const auto& block_data = circuit_builder.blocks.get(); for (const auto& [key, gates] : variable_gates) { if (key.first == real_idx) { - BB_ASSERT_EQ(gates.size(), 1U); - size_t gate_index = gates[0]; - UltraBlock block = block_data[key.second]; - info("---- printing variables in this gate"); - info("w_l == ", - block.w_l()[gate_index], - " w_r == ", - block.w_r()[gate_index], - " w_o == ", - block.w_o()[gate_index], - " w_4 == ", - block.w_4()[gate_index]); - info("---- printing gate selectors where variable with index ", key.first, " was found ----"); - auto q_m = block.q_m()[gate_index]; - if (!q_m.is_zero()) { - info("q_m == ", q_m); - } - auto q_1 = block.q_1()[gate_index]; - if (!q_1.is_zero()) { - info("q1 == ", q_1); - } - auto q_2 = block.q_2()[gate_index]; - if (!q_2.is_zero()) { - info("q2 == ", q_2); - } - auto q_3 = block.q_3()[gate_index]; - if (!q_3.is_zero()) { - info("q3 == ", q_3); - } - auto q_4 = block.q_4()[gate_index]; - if (!q_4.is_zero()) { - info("q4 == ", q_4); - } - auto q_c = block.q_c()[gate_index]; - if (!q_c.is_zero()) { - info("q_c == ", q_c); - } - auto q_arith = block.q_arith()[gate_index]; - if (!q_arith.is_zero()) { - info("q_arith == ", q_arith); - } - auto q_delta_range = block.q_delta_range()[gate_index]; - if (!q_delta_range.is_zero()) { - info("q_delta_range == ", q_delta_range); - } - auto q_elliptic = block.q_elliptic()[gate_index]; - if (!q_elliptic.is_zero()) { - info("q_elliptic == ", q_elliptic); - } - auto q_memory = block.q_memory()[gate_index]; - if (!q_memory.is_zero()) { - info("q_memory == ", q_memory); - } - auto q_nnf = block.q_nnf()[gate_index]; - if (!q_nnf.is_zero()) { - info("q_nnf == ", q_nnf); - } - auto q_lookup_type = block.q_lookup_type()[gate_index]; - if (!q_lookup_type.is_zero()) { - info("q_lookup_type == ", q_lookup_type); - } - auto q_poseidon2_external = block.q_poseidon2_external()[gate_index]; - if (!q_poseidon2_external.is_zero()) { - info("q_poseidon2_external == ", q_poseidon2_external); - } - auto q_poseidon2_internal = block.q_poseidon2_internal()[gate_index]; - if (!q_poseidon2_internal.is_zero()) { - info("q_poseidon2_internal == ", q_poseidon2_internal); + for (size_t i = 0; i < gates.size(); i++) { + size_t gate_index = gates[i]; + auto& block = block_data[key.second]; + info("---- printing variables in this gate"); + info("w_l == ", + block.w_l()[gate_index], + " w_r == ", + block.w_r()[gate_index], + " w_o == ", + block.w_o()[gate_index], + " w_4 == ", + block.w_4()[gate_index]); + info("---- printing gate info where variable with index ", key.first, " was found ----"); + print_arithmetic_gate_info(gate_index, block); + print_elliptic_gate_info(gate_index, block); + print_plookup_gate_info(gate_index, block); + print_poseidon2s_gate_info(gate_index, block); + print_delta_range_gate_info(gate_index, block); + print_nnf_gate_info(gate_index, block); + print_memory_gate_info(gate_index, block); + if constexpr (IsMegaBuilder) { + auto q_databus = block.q_busread()[gate_index]; + if (!q_databus.is_zero()) { + info("q_databus == ", q_databus); + } + } + info("---- finished printing ----"); } - info("---- finished printing ----"); } } } -template class StaticAnalyzer_; +template +std::pair, std::unordered_set> StaticAnalyzer_:: + analyze_circuit() +{ + auto connected_components = find_connected_components(); + auto variables_in_one_gate = get_variables_in_one_gate(); + return std::make_pair(connected_components, variables_in_one_gate); +} +template class StaticAnalyzer_; +template class StaticAnalyzer_; } // namespace cdg diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.hpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.hpp index d4d252badfdd..53c88237deda 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.hpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.hpp @@ -1,4 +1,5 @@ #pragma once +#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" #include #include @@ -9,8 +10,6 @@ #include namespace cdg { - -using UltraBlock = bb::UltraTraceBlock; /** * We've added a new feature to the static analyzer that tracks which gates contain each variable. * This is helpful for removing false-positive variables from the analyzer by using gate selectors @@ -49,6 +48,19 @@ struct KeyEquals { } }; +struct ConnectedComponent { + std::vector variable_indices; + bool is_range_list_cc; + bool is_finalize_cc; + ConnectedComponent() = default; + ConnectedComponent(const std::vector& vector) + : variable_indices(vector) + , is_range_list_cc(false) + , is_finalize_cc(false) {}; + size_t size() const { return variable_indices.size(); } + const std::vector& vars() const { return variable_indices; } +}; + /* * This class describes an arithmetic circuit as an undirected graph, where vertices are variables from the circuit. * Edges describe connections between variables through gates. We want to find variables that weren't properly @@ -57,133 +69,98 @@ struct KeyEquals { * variable wasn't constrained properly. If the number of connected components > 1, it means that there were some missed * connections between variables. */ -template class StaticAnalyzer_ { +template class StaticAnalyzer_ { public: StaticAnalyzer_() = default; StaticAnalyzer_(const StaticAnalyzer_& other) = delete; StaticAnalyzer_(StaticAnalyzer_&& other) = delete; StaticAnalyzer_& operator=(const StaticAnalyzer_& other) = delete; StaticAnalyzer_&& operator=(StaticAnalyzer_&& other) = delete; - StaticAnalyzer_(bb::UltraCircuitBuilder& ultra_circuit_constructor, bool connect_variables = true); + StaticAnalyzer_(CircuitBuilder& circuit_builder, bool connect_variables = true); /** * @brief Convert a vector of variable indices to their real indices - * @param ultra_circuit_constructor The UltraCircuitBuilder instance * @param variable_indices The vector of variable indices to convert * @return std::vector A vector of real variable indices */ - std::vector to_real(bb::UltraCircuitBuilder& ultra_circuit_constructor, - std::vector& variable_indices) + std::vector to_real(std::vector& variable_indices) { std::vector real_variable_indices; real_variable_indices.reserve(variable_indices.size()); for (auto& variable_index : variable_indices) { - real_variable_indices.push_back(to_real(ultra_circuit_constructor, variable_index)); + real_variable_indices.push_back(to_real(variable_index)); } return real_variable_indices; }; - - uint32_t to_real(bb::UltraCircuitBuilder& ultra_circuit_constructor, const uint32_t& variable_index) + uint32_t to_real(const uint32_t& variable_index) const { - return ultra_circuit_constructor.real_variable_index[variable_index]; - }; - size_t find_block_index(bb::UltraCircuitBuilder& ultra_builder, const UltraBlock& block); + return circuit_builder.real_variable_index[variable_index]; + } + size_t find_block_index(const auto& block); void process_gate_variables(std::vector& gate_variables, size_t gate_index, size_t blk_idx); - std::unordered_map get_variables_gate_counts() { return this->variables_gate_counts; }; - - std::vector> get_arithmetic_gate_connected_component( - bb::UltraCircuitBuilder& ultra_circuit_builder, size_t index, size_t block_idx, UltraBlock& blk); - std::vector get_elliptic_gate_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_plookup_gate_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_sort_constraint_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_poseido2s_gate_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_memory_gate_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_non_native_field_gate_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - size_t index, - size_t block_idx, - UltraBlock& blk); - std::vector get_rom_table_connected_component(bb::UltraCircuitBuilder& ultra_circuit_builder, - const bb::RomTranscript& rom_array); - std::vector get_ram_table_connected_component(bb::UltraCircuitBuilder& ultra_builder, - const bb::RamTranscript& ram_array); + std::unordered_map get_variables_gate_counts() const { return this->variables_gate_counts; }; + + void process_execution_trace(); + + std::vector get_arithmetic_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_elliptic_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_plookup_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_sort_constraint_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_poseido2s_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_non_native_field_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_memory_gate_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_rom_table_connected_component(const bb::RomTranscript& rom_array); + std::vector get_ram_table_connected_component(const bb::RamTranscript& ram_array); + // functions for MegaCircuitBuilder + std::vector get_databus_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_eccop_connected_component(size_t index, size_t block_idx, auto& blk); + std::vector get_eccop_part_connected_component(size_t index, size_t block_idx, auto& blk); void add_new_edge(const uint32_t& first_variable_index, const uint32_t& second_variable_index); - std::vector get_variable_adjacency_list(const uint32_t& variable_index) - { - return variable_adjacency_lists[variable_index]; - }; - void depth_first_search(const uint32_t& variable_index, std::unordered_set& is_used, std::vector& connected_component); - std::vector> find_connected_components(); - - std::vector find_variables_with_degree_one(); - std::unordered_set get_variables_in_one_gate(); - - bool find_arithmetic_gate_for_variable(bb::UltraCircuitBuilder& ultra_circuit_builder, - const uint32_t& variable_idx); - bool find_elliptic_gate_for_variable(bb::UltraCircuitBuilder& ultra_circuit_builder, const uint32_t& variable_idx); - bool find_lookup_gate_for_variable(bb::UltraCircuitBuilder& ultra_circuit_builder, const uint32_t& variable_idx); - - size_t get_distance_between_variables(const uint32_t& first_variable_index, const uint32_t& second_variable_index); + void mark_range_list_connected_components(); + void mark_finalize_connected_components(); + std::vector find_connected_components(bool return_all_connected_components = false); bool check_vertex_in_connected_component(const std::vector& connected_component, const uint32_t& var_index); - - void connect_all_variables_in_vector(bb::UltraCircuitBuilder& ultra_circuit_builder, - const std::vector& variables_vector); - bool check_is_not_constant_variable(bb::UltraCircuitBuilder& ultra_circuit_builder, const uint32_t& variable_index); + void connect_all_variables_in_vector(const std::vector& variables_vector); + bool check_is_not_constant_variable(const uint32_t& variable_index); std::pair, size_t> get_connected_component_with_index( const std::vector>& connected_components, size_t index); - std::unordered_set get_variables_in_one_gate_without_range_constraints( - bb::UltraCircuitBuilder& ultra_circuit_builder); - - size_t process_current_decompose_chain(bb::UltraCircuitBuilder& ultra_circuit_constructor, - std::unordered_set& variables_in_one_gate, - size_t index); - void process_current_plookup_gate(bb::UltraCircuitBuilder& ultra_circuit_builder, size_t gate_index); - void remove_unnecessary_decompose_variables(bb::UltraCircuitBuilder& ultra_circuit_builder, - std::unordered_set& variables_in_on_gate, - const std::unordered_set& decompose_variables); - void remove_unnecessary_plookup_variables(bb::UltraCircuitBuilder& ultra_circuit_builder); - void remove_unnecessary_range_constrains_variables(bb::UltraCircuitBuilder& ultra_builder); - std::unordered_set show_variables_in_one_gate(bb::UltraCircuitBuilder& ultra_circuit_builder); - - void remove_unnecessary_aes_plookup_variables(std::unordered_set& variables_in_one_gate, - bb::UltraCircuitBuilder& ultra_circuit_builder, - bb::plookup::BasicTableId& table_id, - size_t gate_index); - void remove_unnecessary_sha256_plookup_variables(std::unordered_set& variables_in_one_gate, - bb::UltraCircuitBuilder& ultra_circuit_builder, - bb::plookup::BasicTableId& table_id, - size_t gate_index); - void remove_record_witness_variables(bb::UltraCircuitBuilder& ultra_builder); - - void print_graph(); - void print_connected_components(); + size_t process_current_decompose_chain(size_t index); + void process_current_plookup_gate(size_t gate_index); + void remove_unnecessary_decompose_variables(const std::unordered_set& decompose_variables); + void remove_unnecessary_plookup_variables(); + void remove_unnecessary_range_constrains_variables(); + std::unordered_set get_variables_in_one_gate(); + + void remove_unnecessary_aes_plookup_variables(bb::plookup::BasicTableId& table_id, size_t gate_index); + void remove_unnecessary_sha256_plookup_variables(bb::plookup::BasicTableId& table_id, size_t gate_index); + void remove_record_witness_variables(); + + std::pair, std::unordered_set> analyze_circuit(); + + void print_connected_components_info(); void print_variables_gate_counts(); - void print_variables_edge_counts(); - void print_variable_in_one_gate(bb::UltraCircuitBuilder& ultra_builder, const uint32_t real_idx); + void print_arithmetic_gate_info(size_t gate_idx, auto& block); + void print_elliptic_gate_info(size_t gate_idx, auto& block); + void print_plookup_gate_info(size_t gate_idx, auto& block); + void print_poseidon2s_gate_info(size_t gate_idx, auto& block); + void print_nnf_gate_info(size_t gate_idx, auto& block); + void print_memory_gate_info(size_t gate_idx, auto& block); + void print_delta_range_gate_info(size_t gate_idx, auto& block); + void print_variable_info(const uint32_t real_idx); ~StaticAnalyzer_() = default; private: + // Store reference to the circuit builder + CircuitBuilder& circuit_builder; + bool connect_variables; + std::unordered_map> variable_adjacency_lists; // we use this data structure to contain information about variables and their // connections between each other @@ -193,11 +170,17 @@ template class StaticAnalyzer_ { variables_degree; // we use this data structure to count, how many every variable have edges std::unordered_map, KeyHasher, KeyEquals> variable_gates; // we use this data structure to store gates and TraceBlocks for every variables, where static - // analyzer found them in the circuit. + // analyzer finds them in the circuit. std::unordered_set variables_in_one_gate; std::unordered_set fixed_variables; + std::vector connected_components; + std::vector + main_connected_components; // connected components without finalize blocks and range lists }; -using StaticAnalyzer = StaticAnalyzer_; +// Type aliases for convenience +using UltraStaticAnalyzer = StaticAnalyzer_; +using MegaStaticAnalyzer = StaticAnalyzer_; +using StaticAnalyzer = UltraStaticAnalyzer; // Default to Ultra for backward compatibility } // namespace cdg diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description.test.cpp index 5419442758bf..42c41a75fb46 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description.test.cpp @@ -23,7 +23,6 @@ using namespace cdg; TEST(boomerang_ultra_circuit_constructor, test_graph_for_arithmetic_gates) { UltraCircuitBuilder circuit_constructor = UltraCircuitBuilder(); - for (size_t i = 0; i < 16; ++i) { for (size_t j = 0; j < 16; ++j) { uint64_t left = static_cast(j); @@ -41,7 +40,7 @@ TEST(boomerang_ultra_circuit_constructor, test_graph_for_arithmetic_gates) StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); auto connected_components = graph.find_connected_components(); - auto variables_in_one_gate = graph.show_variables_in_one_gate(circuit_constructor); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 1024); EXPECT_EQ(connected_components.size(), 256); } @@ -99,7 +98,7 @@ TEST(boomerang_ultra_circuit_constructor, test_graph_for_boolean_gates) StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); auto connected_components = graph.find_connected_components(); auto num_connected_components = connected_components.size(); - auto variables_in_one_gate = graph.show_variables_in_one_gate(circuit_constructor); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); bool result = num_connected_components == 0; EXPECT_EQ(result, true); EXPECT_EQ(variables_in_one_gate.size(), 20); @@ -495,12 +494,12 @@ TEST(boomerang_ultra_circuit_constructor, test_variables_gates_counts_for_sorted auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 2); bool result = true; - for (size_t i = 0; i < connected_components[0].size(); i++) { - result = result && (variables_gate_counts[connected_components[0][i]] == 1); + for (const auto& var_idx : connected_components[0].vars()) { + result = result && (variables_gate_counts[var_idx] == 1); } - for (size_t i = 0; i < connected_components[1].size(); i++) { - result = result && (variables_gate_counts[connected_components[1][i]] == 1); + for (const auto& var_idx : connected_components[1].vars()) { + result = result && (variables_gate_counts[var_idx] == 1); } EXPECT_EQ(result, true); } @@ -514,59 +513,33 @@ TEST(boomerang_ultra_circuit_constructor, test_variables_gates_counts_for_sorted */ TEST(boomerang_ultra_circuit_constructor, test_variables_gates_counts_for_sorted_constraints_with_edges) { - fr a = fr::one(); - fr b = fr(2); - fr c = fr(3); - fr d = fr(4); - fr e = fr(5); - fr f = fr(6); - fr g = fr(7); - fr h = fr(8); - UltraCircuitBuilder circuit_constructor; - auto a_idx = circuit_constructor.add_variable(a); - auto b_idx = circuit_constructor.add_variable(b); - auto c_idx = circuit_constructor.add_variable(c); - auto d_idx = circuit_constructor.add_variable(d); - auto e_idx = circuit_constructor.add_variable(e); - auto f_idx = circuit_constructor.add_variable(f); - auto g_idx = circuit_constructor.add_variable(g); - auto h_idx = circuit_constructor.add_variable(h); - circuit_constructor.create_sort_constraint_with_edges( - { a_idx, b_idx, c_idx, d_idx, e_idx, f_idx, g_idx, h_idx }, a, h); - - fr a1 = fr(9); - fr b1 = fr(10); - fr c1 = fr(11); - fr d1 = fr(12); - fr e1 = fr(13); - fr f1 = fr(14); - fr g1 = fr(15); - fr h1 = fr(16); - - auto a1_idx = circuit_constructor.add_variable(a1); - auto b1_idx = circuit_constructor.add_variable(b1); - auto c1_idx = circuit_constructor.add_variable(c1); - auto d1_idx = circuit_constructor.add_variable(d1); - auto e1_idx = circuit_constructor.add_variable(e1); - auto f1_idx = circuit_constructor.add_variable(f1); - auto g1_idx = circuit_constructor.add_variable(g1); - auto h1_idx = circuit_constructor.add_variable(h1); - - circuit_constructor.create_sort_constraint_with_edges( - { a1_idx, b1_idx, c1_idx, d1_idx, e1_idx, f1_idx, g1_idx, h1_idx }, a1, h1); + auto add_variables = [&circuit_constructor](const std::vector& vars) { + std::vector res; + res.reserve(vars.size()); + for (const auto& var : vars) { + res.emplace_back(circuit_constructor.add_variable(var)); + } + return res; + }; + std::vector vars1 = { fr::one(), fr(2), fr(3), fr(4), fr(5), fr(6), fr(7), fr(8) }; + std::vector vars2 = { fr(9), fr(10), fr(11), fr(12), fr(13), fr(14), fr(15), fr(16) }; + auto var_idx1 = add_variables(vars1); + auto var_idx2 = add_variables(vars2); + circuit_constructor.create_sort_constraint_with_edges(var_idx1, vars1[0], vars1[vars1.size() - 1]); + circuit_constructor.create_sort_constraint_with_edges(var_idx2, vars2[0], vars2[vars2.size() - 1]); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); auto connected_components = graph.find_connected_components(); auto variables_gate_counts = graph.get_variables_gate_counts(); + EXPECT_EQ(connected_components.size(), 2); bool result = true; - for (size_t i = 0; i < connected_components[0].size(); i++) { - result = result && (variables_gate_counts[connected_components[0][i]] == 1); - } - - for (size_t i = 0; i < connected_components[1].size(); i++) { - result = result && (variables_gate_counts[connected_components[1][i]] == 1); + for (size_t i = 0; i < var_idx1.size(); i++) { + if (i % 4 == 1 && i > 1) { + result = variables_gate_counts[var_idx1[i]] == 2; + } else { + result = variables_gate_counts[var_idx1[i]] == 1; + } } - EXPECT_EQ(connected_components.size(), 2); EXPECT_EQ(result, true); } @@ -600,12 +573,11 @@ TEST(boomerang_ultra_circuit_constructor, test_variables_gates_counts_for_ecc_ad StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); auto variables_gate_counts = graph.get_variables_gate_counts(); auto connected_components = graph.find_connected_components(); - bool result = (variables_gate_counts[connected_components[0][0]] == 1) && - (variables_gate_counts[connected_components[0][1]] == 1) && - (variables_gate_counts[connected_components[0][2]] == 1) && - (variables_gate_counts[connected_components[0][3]] == 1) && - (variables_gate_counts[connected_components[0][4]] == 1) && - (variables_gate_counts[connected_components[0][5]] == 1); + auto variable_indices = connected_components[0].vars(); + bool result = + (variables_gate_counts[variable_indices[0]] == 1) && (variables_gate_counts[variable_indices[1]] == 1) && + (variables_gate_counts[variable_indices[2]] == 1) && (variables_gate_counts[variable_indices[3]] == 1) && + (variables_gate_counts[variable_indices[4]] == 1) && (variables_gate_counts[variable_indices[5]] == 1); EXPECT_EQ(connected_components.size(), 1); EXPECT_EQ(result, true); } @@ -638,24 +610,15 @@ TEST(boomerang_ultra_circuit_constructor, test_variables_gates_counts_for_ecc_db auto variables_gate_counts = graph.get_variables_gate_counts(); auto connected_components = graph.find_connected_components(); - bool result = (variables_gate_counts[connected_components[0][0]] == 1) && - (variables_gate_counts[connected_components[0][1]] == 1) && - (variables_gate_counts[connected_components[0][2]] == 1) && - (variables_gate_counts[connected_components[0][3]] == 1); + auto vars = connected_components[0].vars(); + EXPECT_EQ(vars.size(), 4); + bool result = (variables_gate_counts[vars[0]] == 1) && (variables_gate_counts[vars[1]] == 1) && + (variables_gate_counts[vars[2]] == 1) && (variables_gate_counts[vars[3]] == 1); EXPECT_EQ(connected_components.size(), 1); EXPECT_EQ(result, true); } -std::vector add_variables(UltraCircuitBuilder& circuit_constructor, std::vector variables) -{ - std::vector res; - for (size_t i = 0; i < variables.size(); i++) { - res.emplace_back(circuit_constructor.add_variable(variables[i])); - } - return res; -} - /** * @brief Test graph description of circuit with range constraints * @@ -666,7 +629,15 @@ std::vector add_variables(UltraCircuitBuilder& circuit_constructor, st TEST(boomerang_ultra_circuit_constructor, test_graph_for_range_constraints) { UltraCircuitBuilder circuit_constructor = UltraCircuitBuilder(); - auto indices = add_variables(circuit_constructor, { 1, 2, 3, 4 }); + auto add_variables = [&circuit_constructor](const std::vector& vars) { + std::vector res; + res.reserve(vars.size()); + for (const auto& var : vars) { + res.emplace_back(circuit_constructor.add_variable(var)); + } + return res; + }; + auto indices = add_variables({ fr(1), fr(2), fr(3), fr(4) }); for (size_t i = 0; i < indices.size(); i++) { circuit_constructor.create_new_range_constraint(indices[i], 5); } @@ -691,10 +662,10 @@ TEST(boomerang_ultra_circuit_constructor, composed_range_constraint) auto e = fr(d); auto a_idx = circuit_constructor.add_variable(fr(e)); circuit_constructor.create_add_gate( - { a_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e) }); + { a_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e) }); circuit_constructor.decompose_into_default_range(a_idx, 134); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); -} \ No newline at end of file +} diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_aes128.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_aes128.test.cpp index 49321788a779..d5c7f8b7e22f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_aes128.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_aes128.test.cpp @@ -81,7 +81,7 @@ TEST(boomerang_stdlib_aes, test_graph_for_aes_64_bytes) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -135,6 +135,6 @@ TEST(boomerang_stdlib_aes, test_variable_gates_count_for_aes128cbc) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(builder); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_bigfield.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_bigfield.test.cpp index e13d3d598b11..67184497cac9 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_bigfield.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_bigfield.test.cpp @@ -70,7 +70,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_constructors) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 1); } @@ -101,7 +101,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_addition) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -133,7 +133,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_substraction) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); for (const auto& elem : variables_in_one_gate) { info("elem == ", elem); @@ -164,7 +164,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_multiplication) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -198,7 +198,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_division) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -251,7 +251,7 @@ TEST(boomerang_bigfield, test_graph_description_bigfield_mix_operations) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -279,7 +279,7 @@ TEST(boomerang_bigfield, test_graph_description_constructor_high_low_bits_and_op auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -303,7 +303,7 @@ TEST(boomerang_bigfield, test_graph_description_mul_function) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -325,7 +325,7 @@ TEST(boomerang_bigfield, test_graph_description_sqr_function) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -351,7 +351,7 @@ TEST(boomerang_bigfield, test_graph_description_madd_function) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -389,7 +389,7 @@ TEST(boomerang_bigfield, test_graph_description_mult_madd_function) fix_bigfield_element(f); builder.finalize_circuit(false); auto graph = StaticAnalyzer(builder); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -415,6 +415,6 @@ TEST(boomerang_bigfield, test_graph_description_constructor_high_low_bits) builder.finalize_circuit(false); auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake2s.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake2s.test.cpp index d4ff768a8d17..c8bced955486 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake2s.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake2s.test.cpp @@ -34,7 +34,7 @@ TEST(boomerang_stdlib_blake2s, graph_description_single_block_plookup) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -63,6 +63,6 @@ TEST(boomerang_stdlib_blake2s, graph_description_double_block_plookup) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake3s.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake3s.test.cpp index d42f3a767b90..b2d5c085907f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake3s.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_blake3s.test.cpp @@ -37,7 +37,7 @@ TEST(boomerang_stdlib_blake3s, test_single_block_plookup) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -61,6 +61,6 @@ TEST(boomerang_stdlib_blake3s, test_double_block_plookup) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_dynamic_array.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_dynamic_array.test.cpp index 1ad87624e0f0..38c9b88fb741 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_dynamic_array.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_dynamic_array.test.cpp @@ -43,7 +43,7 @@ TEST(boomerang_stdlib_dynamic_array, graph_description_dynamic_array_method_resi array.resize(next_size, 7); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(connected_components.size(), 1); EXPECT_EQ(variables_in_one_gate.size(), max_size); } @@ -83,6 +83,6 @@ TEST(boomerang_stdlib_dynamic_array, graph_description_dynamic_array_consistency StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), max_size); } \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp index 1275bee8255c..29d4c46f16ff 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp @@ -20,7 +20,7 @@ class BoomerangGoblinRecursiveVerifierTests : public testing::Test { using OuterFlavor = UltraFlavor; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using Commitment = MergeVerifier::Commitment; using MergeCommitments = MergeVerifier::InputCommitments; @@ -40,38 +40,25 @@ class BoomerangGoblinRecursiveVerifierTests : public testing::Test { * * @return ProverOutput */ - static ProverOutput create_goblin_prover_output(const size_t NUM_CIRCUITS = 3) + static ProverOutput create_goblin_prover_output() { Goblin goblin; - // Construct and accumulate multiple circuits - for (size_t idx = 0; idx < NUM_CIRCUITS - 1; ++idx) { - MegaCircuitBuilder builder{ goblin.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder); - goblin.prove_merge(); - } - - auto goblin_transcript = std::make_shared(); + GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, 5); - Goblin goblin_final; - goblin_final.op_queue = goblin.op_queue; - MegaCircuitBuilder builder{ goblin_final.op_queue }; - builder.queue_ecc_no_op(); - GoblinMockCircuits::construct_simple_circuit(builder); - goblin_final.op_queue->merge(); + // Merge the ecc ops from the newly constructed circuit + auto goblin_proof = goblin.prove(MergeSettings::APPEND); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; - auto t_current = goblin_final.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin_final.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin_final.op_queue->get_ultra_ops_table_num_rows()); + auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); + auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); } // Output is a goblin proof plus ECCVM/Translator verification keys - return { goblin_final.prove(), - { std::make_shared(), std::make_shared() }, - merge_commitments }; + return { goblin_proof, { std::make_shared(), std::make_shared() }, merge_commitments }; } }; @@ -92,16 +79,19 @@ TEST_F(BoomerangGoblinRecursiveVerifierTests, graph_description_basic) RecursiveCommitment::from_witness(&builder, merge_commitments.t_commitments[idx]); recursive_merge_commitments.T_prev_commitments[idx] = RecursiveCommitment::from_witness(&builder, merge_commitments.T_prev_commitments[idx]); + recursive_merge_commitments.t_commitments[idx].unset_free_witness_tag(); + recursive_merge_commitments.T_prev_commitments[idx].unset_free_witness_tag(); } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); // Construct and verify a proof for the Goblin Recursive Verifier circuit { - auto proving_key = std::make_shared(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, verification_key); + auto prover_instance = std::make_shared(builder); + auto verification_key = + std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, verification_key); OuterVerifier verifier(verification_key); auto proof = prover.construct_proof(); bool verified = verifier.template verify_proof(proof).result; @@ -115,7 +105,7 @@ TEST_F(BoomerangGoblinRecursiveVerifierTests, graph_description_basic) translator_pairing_points.P1.y.fix_witness(); info("Recursive Verifier: num gates = ", builder.num_gates); auto graph = cdg::StaticAnalyzer(builder, false); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_megacircuitbuilder.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_megacircuitbuilder.test.cpp new file mode 100644 index 000000000000..7695124bcb5d --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_megacircuitbuilder.test.cpp @@ -0,0 +1,85 @@ +#include "barretenberg/boomerang_value_detection/graph.hpp" +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/stdlib/primitives/bigfield/constants.hpp" +#include "barretenberg/stdlib/primitives/databus/databus.hpp" +#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" + +using namespace bb; +using namespace cdg; + +using Builder = MegaCircuitBuilder; +using field_ct = stdlib::field_t; +using witness_ct = stdlib::witness_t; +using databus_ct = stdlib::databus; + +namespace { +auto& engine = numeric::get_debug_randomness(); +} +namespace bb { + +TEST(BoomerangMegaCircuitBuilder, BasicCircuit) +{ + MegaCircuitBuilder builder = MegaCircuitBuilder(); + fr a = fr::one(); + builder.add_public_variable(a); + + for (size_t i = 0; i < 16; ++i) { + for (size_t j = 0; j < 16; ++j) { + uint64_t left = static_cast(j); + uint64_t right = static_cast(i); + uint32_t left_idx = builder.add_variable(fr(left)); + uint32_t right_idx = builder.add_variable(fr(right)); + uint32_t result_idx = builder.add_variable(fr(left ^ right)); + + uint32_t add_idx = builder.add_variable(fr(left) + fr(right) + builder.get_variable(result_idx)); + builder.create_big_add_gate( + { left_idx, right_idx, result_idx, add_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); + } + } + + // Compute a simple point accumulation natively + auto P1 = g1::affine_element::random_element(); + auto P2 = g1::affine_element::random_element(); + auto z = fr::random_element(); + + builder.queue_ecc_add_accum(P1); + builder.queue_ecc_mul_accum(P2, z); + builder.queue_ecc_eq(); + + auto tool = MegaStaticAnalyzer(builder); + auto connected_components = tool.find_connected_components(); + EXPECT_EQ(connected_components.size(), 257); + for (size_t i = 0; i < connected_components.size(); i++) { + if (connected_components[i].size() != 4) { + EXPECT_EQ(connected_components[i].size(), 18); + } + } + auto variables_in_one_gate = tool.get_variables_in_one_gate(); +} + +/** + * @brief Check that the ultra ops are recorded correctly in the EccOpQueue + * + */ +TEST(BoomerangMegaCircuitBuilder, OnlyGoblinEccOpQueueUltraOps) +{ + // Construct a simple circuit with op gates + auto builder = MegaCircuitBuilder(); + + // Compute a simple point accumulation natively + auto P1 = g1::affine_element::random_element(); + auto P2 = g1::affine_element::random_element(); + auto z = fr::random_element(); + + // Add gates corresponding to the above operations + builder.queue_ecc_add_accum(P1); + builder.queue_ecc_mul_accum(P2, z); + builder.queue_ecc_eq(); + + auto tool = MegaStaticAnalyzer(builder); + auto cc = tool.find_connected_components(); + EXPECT_EQ(cc.size(), 1); +} +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp new file mode 100644 index 000000000000..ee209ee8429b --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp @@ -0,0 +1,192 @@ +#include "barretenberg/boomerang_value_detection/graph.hpp" +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/ecc/fields/field_conversion.hpp" +#include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/stdlib/merge_verifier/merge_recursive_verifier.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/ultra_honk/merge_prover.hpp" +#include "barretenberg/ultra_honk/merge_verifier.hpp" +#include "barretenberg/ultra_honk/ultra_prover.hpp" +#include "barretenberg/ultra_honk/ultra_verifier.hpp" + +using namespace cdg; + +namespace bb::stdlib::recursion::goblin { + +/** + * @brief Test suite for recursive verification of Goblin Merge proofs + * @details The recursive verification circuit is arithmetized using Goblin-style Ultra arithmetization + * (MegaCircuitBuilder). + * + * @tparam Builder + */ +template class BoomerangRecursiveMergeVerifierTest : public testing::Test { + + // Types for recursive verifier circuit + using RecursiveMergeVerifier = MergeRecursiveVerifier_; + using RecursiveTableCommitments = MergeRecursiveVerifier_::TableCommitments; + using RecursiveMergeCommitments = MergeRecursiveVerifier_::InputCommitments; + + // Define types relevant for inner circuit + using InnerFlavor = MegaFlavor; + using InnerProverInstance = ProverInstance_; + using InnerBuilder = typename InnerFlavor::CircuitBuilder; + + // Define additional types for testing purposes + using Commitment = InnerFlavor::Commitment; + using FF = InnerFlavor::FF; + using VerifierCommitmentKey = bb::VerifierCommitmentKey; + using MergeProof = MergeProver::MergeProof; + using TableCommitments = MergeVerifier::TableCommitments; + using MergeCommitments = MergeVerifier::InputCommitments; + + enum class TamperProofMode { None, Shift, MCommitment, LEval }; + + public: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + + static void tamper_with_proof(MergeProof& merge_proof, const TamperProofMode tampering_mode) + { + const size_t shift_idx = 0; // Index of shift_size in the merge proof + const size_t m_commitment_idx = 1; // Index of first commitment to merged table in merge proof + const size_t l_eval_idx = 34; // Index of first evaluation of l(1/kappa) in merge proof + + switch (tampering_mode) { + case TamperProofMode::Shift: + // Tamper with the shift size in the proof + merge_proof[shift_idx] += 1; + break; + case TamperProofMode::MCommitment: { + // Tamper with the commitment in the proof + Commitment m_commitment = + bb::field_conversion::convert_from_bn254_frs(std::span{ merge_proof }.subspan( + m_commitment_idx, bb::field_conversion::calc_num_bn254_frs())); + m_commitment = m_commitment + Commitment::one(); + auto m_commitment_frs = bb::field_conversion::convert_to_bn254_frs(m_commitment); + for (size_t idx = 0; idx < 4; ++idx) { + merge_proof[m_commitment_idx + idx] = m_commitment_frs[idx]; + } + break; + } + case TamperProofMode::LEval: + // Tamper with the evaluation in the proof + merge_proof[l_eval_idx] -= FF(1); + break; + default: + // Nothing to do + break; + } + } + + static void analyze_circuit(RecursiveBuilder& outer_circuit) + { + if constexpr (IsMegaBuilder) { + MegaStaticAnalyzer tool = MegaStaticAnalyzer(outer_circuit); + auto result = tool.analyze_circuit(); + EXPECT_EQ(result.first.size(), 1); + EXPECT_EQ(result.second.size(), 0); + } + if constexpr (IsUltraBuilder) { + StaticAnalyzer tool = StaticAnalyzer(outer_circuit); + auto result = tool.analyze_circuit(); + EXPECT_EQ(result.first.size(), 1); + EXPECT_EQ(result.second.size(), 0); + } + } + + static void prove_and_verify_merge(const std::shared_ptr& op_queue, + const MergeSettings settings = MergeSettings::PREPEND, + const bool run_analyzer = false, + const TamperProofMode tampering_mode = TamperProofMode::None, + const bool expected = true) + + { + RecursiveBuilder outer_circuit; + + MergeProver merge_prover{ op_queue, settings }; + auto merge_proof = merge_prover.construct_proof(); + tamper_with_proof(merge_proof, tampering_mode); + + // Subtable values and commitments - needed for (Recursive)MergeVerifier + MergeCommitments merge_commitments; + RecursiveMergeCommitments recursive_merge_commitments; + auto t_current = op_queue->construct_current_ultra_ops_subtable_columns(); + auto T_prev = op_queue->construct_previous_ultra_ops_table_columns(); + for (size_t idx = 0; idx < InnerFlavor::NUM_WIRES; idx++) { + merge_commitments.t_commitments[idx] = merge_prover.pcs_commitment_key.commit(t_current[idx]); + merge_commitments.T_prev_commitments[idx] = merge_prover.pcs_commitment_key.commit(T_prev[idx]); + recursive_merge_commitments.t_commitments[idx] = + RecursiveMergeVerifier::Commitment::from_witness(&outer_circuit, merge_commitments.t_commitments[idx]); + recursive_merge_commitments.T_prev_commitments[idx] = RecursiveMergeVerifier::Commitment::from_witness( + &outer_circuit, merge_commitments.T_prev_commitments[idx]); + // Removing the free witness tag, since the merge commitments in the full scheme are supposed to + // be fiat-shamirred earlier + recursive_merge_commitments.t_commitments[idx].unset_free_witness_tag(); + recursive_merge_commitments.T_prev_commitments[idx].unset_free_witness_tag(); + } + + // Create a recursive merge verification circuit for the merge proof + RecursiveMergeVerifier verifier{ &outer_circuit, settings }; + verifier.transcript->enable_manifest(); + const stdlib::Proof stdlib_merge_proof(outer_circuit, merge_proof); + [[maybe_unused]] auto [pairing_points, recursive_merged_table_commitments] = + verifier.verify_proof(stdlib_merge_proof, recursive_merge_commitments); + + // Check for a failure flag in the recursive verifier circuit + EXPECT_EQ(outer_circuit.failed(), !expected) << outer_circuit.err(); + if (run_analyzer) { + analyze_circuit(outer_circuit); + } + } + + static void test_recursive_merge_verification_prepend() + { + auto op_queue = std::make_shared(); + + InnerBuilder circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit); + prove_and_verify_merge(op_queue); + + InnerBuilder circuit2{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit2); + prove_and_verify_merge(op_queue); + + InnerBuilder circuit3{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit3); + prove_and_verify_merge(op_queue, MergeSettings::PREPEND, true); + } + + static void test_recursive_merge_verification_append() + { + auto op_queue = std::make_shared(); + + InnerBuilder circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit); + prove_and_verify_merge(op_queue); + + InnerBuilder circuit2{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit2); + prove_and_verify_merge(op_queue); + + InnerBuilder circuit3{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit3); + prove_and_verify_merge(op_queue, MergeSettings::APPEND, true); + } +}; + +using Builder = testing::Types; + +TYPED_TEST_SUITE(BoomerangRecursiveMergeVerifierTest, Builder); + +TYPED_TEST(BoomerangRecursiveMergeVerifierTest, RecursiveVerificationPrepend) +{ + TestFixture::test_recursive_merge_verification_prepend(); +}; + +TYPED_TEST(BoomerangRecursiveMergeVerifierTest, RecursiveVerificationAppend) +{ + TestFixture::test_recursive_merge_verification_append(); +}; + +} // namespace bb::stdlib::recursion::goblin diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_pedersen.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_pedersen.test.cpp new file mode 100644 index 000000000000..88eca174c626 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_pedersen.test.cpp @@ -0,0 +1,232 @@ +#include "barretenberg/boomerang_value_detection/graph.hpp" +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/numeric/random/engine.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib_circuit_builders/plookup_tables/fixed_base/fixed_base.hpp" + +using namespace cdg; +using namespace bb; + +namespace { +auto& engine = numeric::get_debug_randomness(); +} + +template class BoomerangStdlibPedersen : public testing::Test { + using _curve = stdlib::bn254; + + using byte_array_ct = typename _curve::byte_array_ct; + using fr_ct = typename _curve::ScalarField; + using witness_ct = typename _curve::witness_ct; + using public_witness_ct = typename _curve::public_witness_ct; + using pedersen_hash = typename stdlib::pedersen_hash; + + public: + static void analyze_circuit(Builder& builder) + { + if constexpr (IsMegaBuilder) { + MegaStaticAnalyzer tool = MegaStaticAnalyzer(builder); + auto res = tool.analyze_circuit(); + auto cc = res.first; + auto variables_in_one_gate = res.second; + EXPECT_EQ(cc.size(), 1); + EXPECT_EQ(variables_in_one_gate.size(), 0); + if (variables_in_one_gate.size() > 0) { + auto first_element = + std::vector(variables_in_one_gate.begin(), variables_in_one_gate.end())[0]; + tool.print_variable_info(first_element); + } + } + if constexpr (IsUltraBuilder) { + StaticAnalyzer tool = StaticAnalyzer(builder); + auto res = tool.analyze_circuit(); + auto cc = res.first; + auto variables_in_one_gate = res.second; + EXPECT_EQ(cc.size(), 1); + EXPECT_EQ(variables_in_one_gate.size(), 0); + if (variables_in_one_gate.size() > 0) { + auto first_element = + std::vector(variables_in_one_gate.begin(), variables_in_one_gate.end())[0]; + tool.print_variable_info(first_element); + } + } + } + static void test_pedersen_two() + { + Builder builder; + + fr left_in = fr::random_element(); + fr right_in = fr::random_element(); + + // ensure left has skew 1, right has skew 0 + if ((left_in.from_montgomery_form().data[0] & 1) == 1) { + left_in += fr::one(); + } + if ((right_in.from_montgomery_form().data[0] & 1) == 0) { + right_in += fr::one(); + } + + fr_ct left = public_witness_ct(&builder, left_in); + fr_ct right = witness_ct(&builder, right_in); + + builder.fix_witness(left.witness_index, left.get_value()); + builder.fix_witness(right.witness_index, right.get_value()); + + fr_ct out = pedersen_hash::hash({ left, right }); + out.fix_witness(); + + analyze_circuit(builder); + } + + static void test_pedersen_large() + { + Builder builder; + fr left_in = fr::random_element(); + fr right_in = fr::random_element(); + // ensure left has skew 1, right has skew 0 + if ((left_in.from_montgomery_form().data[0] & 1) == 1) { + left_in += fr::one(); + } + if ((right_in.from_montgomery_form().data[0] & 1) == 0) { + right_in += fr::one(); + } + fr_ct left = witness_ct(&builder, left_in); + builder.update_used_witnesses(left.witness_index); + fr_ct right = witness_ct(&builder, right_in); + for (size_t i = 0; i < 256; ++i) { + left = pedersen_hash::hash({ left, right }); + } + left.fix_witness(); + builder.set_public_input(left.witness_index); + bool result = CircuitChecker::check(builder); + EXPECT_EQ(result, true); + analyze_circuit(builder); + } + + static void test_hash_eight() + { + Builder builder; + + std::vector inputs; + inputs.reserve(8); + std::vector> witness_inputs; + + for (size_t i = 0; i < 8; ++i) { + inputs.emplace_back(bb::fr::random_element()); + witness_inputs.emplace_back(witness_ct(&builder, inputs[i])); + } + std::vector witness_indices; + for (auto& wi : witness_inputs) { + witness_indices.emplace_back(wi.witness_index); + } + // In a test we don't have additional constraints except for constraint for splitting inputs on 2 scalars for + // batch_mul and checking linear_identity. So we can put them into used_witnesses. + builder.update_used_witnesses(witness_indices); + constexpr size_t hash_idx = 10; + auto result = pedersen_hash::hash(witness_inputs, hash_idx); + result.fix_witness(); + analyze_circuit(builder); + } + + static void test_multi_hash() + { + Builder builder; + + for (size_t i = 0; i < 7; ++i) { + std::vector inputs; + inputs.push_back(bb::fr::random_element()); + inputs.push_back(bb::fr::random_element()); + inputs.push_back(bb::fr::random_element()); + inputs.push_back(bb::fr::random_element()); + + if (i == 1) { + inputs[0] = fr(0); + } + if (i == 2) { + inputs[1] = fr(0); + inputs[2] = fr(0); + } + if (i == 3) { + inputs[3] = fr(0); + } + if (i == 4) { + inputs[0] = fr(0); + inputs[3] = fr(0); + } + if (i == 5) { + inputs[0] = fr(0); + inputs[1] = fr(0); + inputs[2] = fr(0); + inputs[3] = fr(0); + } + if (i == 6) { + inputs[1] = fr(1); + } + std::vector witnesses; + for (auto input : inputs) { + witnesses.push_back(witness_ct(&builder, input)); + } + // In a test we don't have additional constraints except for constraint for splitting inputs on 2 scalars + // for batch_mul and checking linear_identity. So we can put them into used_witnesses. + for (auto wit : witnesses) { + builder.update_used_witnesses(wit.witness_index); + } + fr_ct result = pedersen_hash::hash(witnesses); + result.fix_witness(); + } + analyze_circuit(builder); + } + + static void test_large_inputs() + { + Builder builder; + std::vector native_inputs; + std::vector witness_inputs; + + constexpr size_t size = 200; + for (size_t i = 0; i < size; ++i) { + native_inputs.push_back(fr::random_element()); + witness_inputs.push_back(witness_ct(&builder, native_inputs.back())); + } + // In a test we don't have additional constraints except for constraint for splitting inputs on 2 scalars for + // batch_mul and checking linear_identity. So we can put them into used_witnesses. + for (auto wi : witness_inputs) { + builder.update_used_witnesses(wi.witness_index); + } + auto result = pedersen_hash::hash(witness_inputs); + result.fix_witness(); + analyze_circuit(builder); + } +}; + +using CircuitTypes = testing::Types; + +TYPED_TEST_SUITE(BoomerangStdlibPedersen, CircuitTypes); + +TYPED_TEST(BoomerangStdlibPedersen, Small) +{ + TestFixture::test_pedersen_two(); +} + +TYPED_TEST(BoomerangStdlibPedersen, Large) +{ + TestFixture::test_pedersen_large(); +} + +TYPED_TEST(BoomerangStdlibPedersen, HashEight) +{ + TestFixture::test_hash_eight(); +} + +TYPED_TEST(BoomerangStdlibPedersen, TestLargeInputs) +{ + TestFixture::test_large_inputs(); +} + +TYPED_TEST(BoomerangStdlibPedersen, TestMultiHash) +{ + TestFixture::test_multi_hash(); +} diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_poseidon2s_permutation.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_poseidon2s_permutation.test.cpp index d660f103591a..0587f213af1b 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_poseidon2s_permutation.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_poseidon2s_permutation.test.cpp @@ -20,7 +20,7 @@ auto& engine = numeric::get_debug_randomness(); using Params = crypto::Poseidon2Bn254ScalarFieldParams; using Builder = UltraCircuitBuilder; -using Permutation = stdlib::Poseidon2Permutation; +using Permutation = stdlib::Poseidon2Permutation; using field_t = stdlib::field_t; using witness_t = stdlib::witness_t; using _curve = stdlib::bn254; @@ -70,42 +70,11 @@ void test_poseidon2s_circuit(size_t num_inputs = 5) for (auto& elem : inputs) { elem.fix_witness(); } - [[maybe_unused]] auto result = stdlib::poseidon2::hash(builder, inputs); + [[maybe_unused]] auto result = stdlib::poseidon2::hash(inputs); auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); - std::unordered_set outputs{ - result.witness_index, result.witness_index + 1, result.witness_index + 2, result.witness_index + 3 - }; - for (const auto& elem : variables_in_one_gate) { - EXPECT_EQ(outputs.contains(elem), true); - } -} - -/** - * @brief Test graph description for poseidon2 hash with byte array input - * - * The result should be one connected component, and only output variables must be in one gate - * - * @param num_inputs Number of random bytes to generate - */ -void test_poseidon2s_hash_byte_array(size_t num_inputs = 5) -{ - Builder builder; - - std::vector input; - input.reserve(num_inputs); - for (size_t i = 0; i < num_inputs; ++i) { - input.push_back(engine.get_random_uint8()); - } - - byte_array_ct circuit_input(&builder, input); - auto result = stdlib::poseidon2::hash_buffer(builder, circuit_input); - auto graph = StaticAnalyzer(builder); - auto connected_components = graph.find_connected_components(); - EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); std::unordered_set outputs{ result.witness_index, result.witness_index + 1, result.witness_index + 2, result.witness_index + 3 }; @@ -135,7 +104,7 @@ void test_poseidon2s_hash_repeated_pairs(size_t num_inputs = 5) std::unordered_set outputs{ left.witness_index }; // num_inputs - 1 iterations since the first hash hashes two elements for (size_t i = 0; i < num_inputs - 1; ++i) { - left = stdlib::poseidon2::hash(builder, { left, right }); + left = stdlib::poseidon2::hash({ left, right }); outputs.insert(left.witness_index + 1); outputs.insert(left.witness_index + 2); outputs.insert(left.witness_index + 3); @@ -145,7 +114,7 @@ void test_poseidon2s_hash_repeated_pairs(size_t num_inputs = 5) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); for (const auto& elem : variables_in_one_gate) { EXPECT_EQ(outputs.contains(elem), true); } @@ -176,7 +145,7 @@ TEST(boomerang_poseidon2s, test_graph_for_poseidon2s_one_permutation) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -212,7 +181,7 @@ TEST(boomerang_poseidon2s, test_graph_for_poseidon2s_two_permutations) auto graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 2); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -234,16 +203,6 @@ TEST(boomerang_poseidon2s, test_graph_for_poseidon2s_for_one_input_size) test_poseidon2s_circuit(); } -/** - * @brief Test graph for poseidon2s hash with byte arrays of varying sizes - */ -TEST(boomerang_poseidon2s, test_graph_for_poseidon2s_hash_byte_array) -{ - for (size_t num_inputs = 6; num_inputs < 100; num_inputs++) { - test_poseidon2s_hash_byte_array(num_inputs); - } -} - /** * @brief Test graph for poseidon2s with repeated hash operations */ diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_protogalaxy.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_protogalaxy.test.cpp new file mode 100644 index 000000000000..89ce36f2f53d --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_protogalaxy.test.cpp @@ -0,0 +1,267 @@ +#include "barretenberg/boomerang_value_detection/graph.hpp" +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/protogalaxy/protogalaxy_prover.hpp" +#include "barretenberg/protogalaxy/protogalaxy_verifier.hpp" +#include "barretenberg/stdlib/hash/blake3s/blake3s.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" +#include "barretenberg/stdlib/honk_verifier/decider_recursive_verifier.hpp" +#include "barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.hpp" +#include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" +#include "barretenberg/ultra_honk/decider_prover.hpp" +#include "barretenberg/ultra_honk/ultra_prover.hpp" +#include "barretenberg/ultra_honk/ultra_verifier.hpp" + +auto& engine = bb::numeric::get_debug_randomness(); + +namespace bb::stdlib::recursion::honk { +class BoomerangProtogalaxyRecursiveTests : public testing::Test { + public: + // Define types for the inner circuit, i.e. the circuit whose proof will be recursively verified + using RecursiveFlavor = MegaRecursiveFlavor_; + using InnerFlavor = RecursiveFlavor::NativeFlavor; + using InnerProver = UltraProver_; + using InnerVerifier = UltraVerifier_; + using InnerBuilder = InnerFlavor::CircuitBuilder; + using InnerProverInstance = ProverInstance_; + using InnerVerifierInstance = ::bb::VerifierInstance_; + using InnerVerificationKey = InnerFlavor::VerificationKey; + using InnerCurve = bn254; + using Commitment = InnerFlavor::Commitment; + using FF = InnerFlavor::FF; + + // Defines types for the outer circuit, i.e. the circuit of the recursive verifier + using OuterBuilder = RecursiveFlavor::CircuitBuilder; + using OuterFlavor = std::conditional_t, MegaFlavor, UltraFlavor>; + using OuterProver = UltraProver_; + using OuterVerifier = UltraVerifier_; + using OuterProverInstance = ProverInstance_; + + using RecursiveVerifierInstance = RecursiveVerifierInstance_; + using RecursiveVerificationKey = RecursiveVerifierInstance::VerificationKey; + using RecursiveVKAndHash = RecursiveVerifierInstance::VKAndHash; + using FoldingRecursiveVerifier = ProtogalaxyRecursiveVerifier_; + using DeciderRecursiveVerifier = DeciderRecursiveVerifier_; + using InnerDeciderProver = DeciderProver_; + using InnerDeciderVerifier = DeciderVerifier_; + using InnerFoldingVerifier = ProtogalaxyVerifier_; + using InnerFoldingProver = ProtogalaxyProver_; + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + + static void create_function_circuit(InnerBuilder& builder, size_t log_num_gates = 10) + { + using fr_ct = typename InnerCurve::ScalarField; + using fq_ct = stdlib::bigfield; + using public_witness_ct = typename InnerCurve::public_witness_ct; + using witness_ct = typename InnerCurve::witness_ct; + using byte_array_ct = typename InnerCurve::byte_array_ct; + using fr = typename InnerCurve::ScalarFieldNative; + + // Create 2^log_n many add gates based on input log num gates + const size_t num_gates = 1 << log_num_gates; + for (size_t i = 0; i < num_gates; ++i) { + fr a = fr::random_element(&engine); + uint32_t a_idx = builder.add_variable(a); + + fr b = fr::random_element(&engine); + fr c = fr::random_element(&engine); + fr d = a + b + c; + uint32_t b_idx = builder.add_variable(b); + uint32_t c_idx = builder.add_variable(c); + uint32_t d_idx = builder.add_variable(d); + + builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); + } + + // Define some additional non-trivial but arbitrary circuit logic + fr_ct a(public_witness_ct(&builder, fr::random_element(&engine))); + fr_ct b(public_witness_ct(&builder, fr::random_element(&engine))); + fr_ct c(public_witness_ct(&builder, fr::random_element(&engine))); + + for (size_t i = 0; i < 32; ++i) { + a = (a * b) + b + a; + a = a.madd(b, c); + } + pedersen_hash::hash({ a, b }); + byte_array_ct to_hash(&builder, "nonsense test data"); + stdlib::Blake3s::hash(to_hash); + + fr bigfield_data = fr::random_element(&engine); + fr bigfield_data_a{ bigfield_data.data[0], bigfield_data.data[1], 0, 0 }; + fr bigfield_data_b{ bigfield_data.data[2], bigfield_data.data[3], 0, 0 }; + + fq_ct big_a(fr_ct(witness_ct(&builder, bigfield_data_a.to_montgomery_form())), fr_ct(witness_ct(&builder, 0))); + fq_ct big_b(fr_ct(witness_ct(&builder, bigfield_data_b.to_montgomery_form())), fr_ct(witness_ct(&builder, 0))); + + big_a* big_b; + + stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); + }; + + static void test_recursive_folding(const size_t num_verifiers = 1) + { + // Create two arbitrary circuits for the first round of folding + InnerBuilder builder1; + create_function_circuit(builder1); + InnerBuilder builder2; + builder2.add_public_variable(FF(1)); + create_function_circuit(builder2); + + auto prover_inst_1 = std::make_shared(builder1); + auto honk_vk_1 = std::make_shared(prover_inst_1->get_precomputed()); + auto verifier_inst_1 = std::make_shared(honk_vk_1); + auto prover_inst_2 = std::make_shared(builder2); + auto honk_vk_2 = std::make_shared(prover_inst_2->get_precomputed()); + auto verifier_inst_2 = std::make_shared(honk_vk_2); + // Generate a folding proof + InnerFoldingProver folding_prover({ prover_inst_1, prover_inst_2 }, + { verifier_inst_1, verifier_inst_2 }, + std::make_shared()); + auto folding_proof = folding_prover.prove(); + + // Create a folding verifier circuit + OuterBuilder folding_circuit; + + auto recursive_verifier_inst_1 = std::make_shared(&folding_circuit, verifier_inst_1); + auto recursive_vk_and_hash_2 = std::make_shared(folding_circuit, verifier_inst_2->vk); + stdlib::Proof stdlib_proof(folding_circuit, folding_proof.proof); + + auto recursive_transcript = std::make_shared(); + auto verifier = FoldingRecursiveVerifier{ + &folding_circuit, recursive_verifier_inst_1, recursive_vk_and_hash_2, recursive_transcript + }; + std::shared_ptr accumulator; + for (size_t idx = 0; idx < num_verifiers; idx++) { + verifier.transcript->enable_manifest(); + accumulator = verifier.verify_folding_proof(stdlib_proof); + if (idx < num_verifiers - 1) { // else the transcript is null in the test below + auto recursive_vk_and_hash = std::make_shared(folding_circuit, verifier_inst_1->vk); + verifier = FoldingRecursiveVerifier{ + &folding_circuit, accumulator, recursive_vk_and_hash, recursive_transcript + }; + } + } + { + stdlib::recursion::honk::DefaultIO::add_default(folding_circuit); + // inefficiently check finalized size + folding_circuit.finalize_circuit(/* ensure_nonzero= */ true); + info("Folding Recursive Verifier: num gates finalized = ", folding_circuit.num_gates); + auto decider_pk = std::make_shared(folding_circuit); + info("Dyadic size of verifier circuit: ", decider_pk->dyadic_size()); + auto honk_vk = std::make_shared(decider_pk->get_precomputed()); + OuterProver prover(decider_pk, honk_vk); + OuterVerifier verifier(honk_vk); + auto proof = prover.construct_proof(); + bool verified = verifier.template verify_proof(proof).result; + + ASSERT_TRUE(verified); + } + EXPECT_EQ(folding_circuit.failed(), false) << folding_circuit.err(); + auto graph = cdg::MegaStaticAnalyzer(folding_circuit); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); + EXPECT_EQ(variables_in_one_gate.size(), 0); + auto connected_components = graph.find_connected_components(/*return_all_connected_components==*/false); + EXPECT_EQ(connected_components.size(), 1); + if (connected_components.size() > 1) { + graph.print_connected_components_info(); + } + } + + static void test_full_protogalaxy_recursive() + { + // Create two arbitrary circuits for the first round of folding + InnerBuilder builder1; + create_function_circuit(builder1); + InnerBuilder builder2; + builder2.add_public_variable(FF(1)); + create_function_circuit(builder2); + + auto prover_inst_1 = std::make_shared(builder1); + auto honk_vk_1 = std::make_shared(prover_inst_1->get_precomputed()); + auto verifier_inst_1 = std::make_shared(honk_vk_1); + auto prover_inst_2 = std::make_shared(builder2); + auto honk_vk_2 = std::make_shared(prover_inst_2->get_precomputed()); + auto verifier_inst_2 = std::make_shared(honk_vk_2); + // Generate a folding proof + InnerFoldingProver folding_prover({ prover_inst_1, prover_inst_2 }, + { verifier_inst_1, verifier_inst_2 }, + std::make_shared()); + auto folding_proof = folding_prover.prove(); + + // Create a folding verifier circuit + OuterBuilder folding_circuit; + auto recursive_verifier_inst_1 = std::make_shared(&folding_circuit, verifier_inst_1); + auto recursive_vk_and_hash_2 = std::make_shared(folding_circuit, verifier_inst_2->vk); + stdlib::Proof stdlib_proof(folding_circuit, folding_proof.proof); + + auto verifier = FoldingRecursiveVerifier{ &folding_circuit, + recursive_verifier_inst_1, + recursive_vk_and_hash_2, + std::make_shared() }; + verifier.transcript->enable_manifest(); + auto recursive_verifier_native_accum = verifier.verify_folding_proof(stdlib_proof); + auto native_verifier_acc = + std::make_shared(recursive_verifier_native_accum->get_value()); + + // Perform native folding verification and ensure it returns the same result (either true or false) as + // calling check_circuit on the recursive folding verifier + InnerFoldingVerifier native_folding_verifier({ verifier_inst_1, verifier_inst_2 }, + std::make_shared()); + native_folding_verifier.transcript->enable_manifest(); + auto verifier_accumulator = native_folding_verifier.verify_folding_proof(folding_proof.proof); + + auto native_decider_transcript = std::make_shared(); + auto native_accum_hash = verifier_accumulator->hash_through_transcript("", *native_decider_transcript); + native_decider_transcript->add_to_hash_buffer("accum_hash", native_accum_hash); + + InnerDeciderProver decider_prover(folding_proof.accumulator, native_decider_transcript); + decider_prover.construct_proof(); + auto decider_proof = decider_prover.export_proof(); + + OuterBuilder decider_circuit; + + auto stdlib_verifier_acc = std::make_shared(&decider_circuit, native_verifier_acc); + auto stdlib_verifier_transcript = std::make_shared(); + auto stdlib_accum_hash = stdlib_verifier_acc->hash_through_transcript("", *stdlib_verifier_transcript); + + // Manually hashing the accumulator to ensure it gets a proper origin tag + stdlib_verifier_transcript->add_to_hash_buffer("accum_hash", stdlib_accum_hash); + DeciderRecursiveVerifier decider_verifier{ &decider_circuit, stdlib_verifier_acc, stdlib_verifier_transcript }; + auto pairing_points = decider_verifier.verify_proof(decider_proof); + + // IO + DefaultIO inputs; + inputs.pairing_inputs = pairing_points; + inputs.set_public(); + + info("Decider Recursive Verifier: num gates = ", decider_circuit.num_gates); + // Check for a failure flag in the recursive verifier circuit + EXPECT_EQ(decider_circuit.failed(), false) << decider_circuit.err(); + auto graph = cdg::MegaStaticAnalyzer(decider_circuit); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); + EXPECT_EQ(variables_in_one_gate.size(), 0); + auto connected_components = graph.find_connected_components(/*return_all_connected_components==*/false); + EXPECT_EQ(connected_components.size(), 1); + if (variables_in_one_gate.size() > 0) { + for (const auto& elem : variables_in_one_gate) { + info("elem == ", elem); + } + } + }; +}; + +TEST_F(BoomerangProtogalaxyRecursiveTests, RecursiveFoldingTestOneVerifier) +{ + BoomerangProtogalaxyRecursiveTests::test_recursive_folding(/* num_verifiers= */ 1); +} + +TEST_F(BoomerangProtogalaxyRecursiveTests, RecursiveFoldingTestTwoVerifiers) +{ + BoomerangProtogalaxyRecursiveTests::test_recursive_folding(/* num_verifiers= */ 2); +} + +TEST_F(BoomerangProtogalaxyRecursiveTests, FullProtogalaxyRecursiveTest) +{ + BoomerangProtogalaxyRecursiveTests::test_full_protogalaxy_recursive(); +} +} // namespace bb::stdlib::recursion::honk diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ram_rom.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ram_rom.test.cpp index a6e981b0ed09..d8b1bc6f7d09 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ram_rom.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ram_rom.test.cpp @@ -52,7 +52,7 @@ TEST(boomerang_rom_ram_table, graph_description_rom_table) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); for (const auto& elem : variables_in_one_gate) { EXPECT_EQ(variables_in_one_gate.contains(elem), true); } @@ -94,7 +94,7 @@ TEST(boomerang_rom_ram_table, graph_description_ram_table_read) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); for (const auto& elem : variables_in_one_gate) { EXPECT_EQ(safety_variables.contains(elem), true); } @@ -167,7 +167,7 @@ TEST(boomerang_rom_ram_table, graph_description_ram_table_write) StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); for (const auto& elem : variables_in_one_gate) { EXPECT_EQ(safety_variables.contains(elem), true); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_sha256.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_sha256.test.cpp index 81cd7bb9f5af..6a39eca0f335 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_sha256.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_sha256.test.cpp @@ -4,7 +4,6 @@ #include "barretenberg/crypto/sha256/sha256.hpp" #include "barretenberg/stdlib/hash/sha256/sha256.hpp" #include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" -#include "barretenberg/stdlib/primitives/packed_byte_array/packed_byte_array.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/plookup_tables.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" @@ -17,28 +16,46 @@ using namespace bb::stdlib; using namespace cdg; using Builder = UltraCircuitBuilder; -using byte_array_pt = byte_array; -using packed_byte_array_pt = packed_byte_array; -using field_pt = field_t; +using byte_array_ct = byte_array; +using field_ct = field_t; + +/** + * @brief Given a `byte_array` object, slice it into chunks of size `num_bytes_in_chunk` and compute field elements + * reconstructed from these chunks. + */ + +std::vector pack_bytes_into_field_elements(const byte_array_ct& input, size_t num_bytes_in_chunk = 4) +{ + std::vector> result; + const size_t byte_len = input.size(); + + for (size_t i = 0; i < byte_len; i += num_bytes_in_chunk) { + byte_array_ct chunk = input.slice(i, std::min(num_bytes_in_chunk, byte_len - i)); + result.emplace_back(static_cast(chunk)); + } + + return result; +} /** static analyzer usually prints input and output variables as variables in one gate. In tests these variables are not dangerous and usually we can filter them by adding gate for fixing witness. Then these variables will be in 2 gates, and static analyzer won't print them. functions fix_vector and fix_byte_array do it - for vector of variables and packed_byte_array respectively + for vector of variables and byte_array respectively */ -void fix_vector(std::vector& vector) +void fix_vector(std::vector& vector) { for (auto& elem : vector) { elem.fix_witness(); } } -void fix_byte_array(packed_byte_array_pt& input) +void fix_byte_array(byte_array_ct& input) { - std::vector limbs = input.get_limbs(); - fix_vector(limbs); + for (size_t idx = 0; idx < input.size(); idx++) { + input[idx].fix_witness(); + } } /** @@ -55,18 +72,18 @@ TEST(boomerang_stdlib_sha256, test_graph_for_sha256_55_bytes) // 55 bytes is the largest number of bytes that can be hashed in a single block, // accounting for the single padding bit, and the 64 size bits required by the SHA-256 standard. auto builder = Builder(); - packed_byte_array_pt input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); + byte_array_ct input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); fix_vector(output); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -87,7 +104,7 @@ HEAVY_TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_five) { auto builder = Builder(); - packed_byte_array_pt input( + byte_array_ct input( &builder, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" @@ -101,14 +118,14 @@ HEAVY_TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_five) "AAAAAAAAAA"); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); fix_vector(output); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); - auto variables_in_one_gate = graph.show_variables_in_one_gate(builder); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); EXPECT_EQ(connected_components.size(), 1); } @@ -126,14 +143,14 @@ HEAVY_TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_five) TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_one) { auto builder = Builder(); - packed_byte_array_pt input(&builder, "abc"); + byte_array_ct input(&builder, "abc"); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); - fix_byte_array(output_bits); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); + fix_byte_array(output_bytes); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(builder); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -146,14 +163,14 @@ TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_one) TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_two) { auto builder = Builder(); - packed_byte_array_pt input(&builder, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); + byte_array_ct input(&builder, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); - fix_byte_array(output_bits); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); + fix_byte_array(output_bytes); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(builder); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -168,14 +185,14 @@ TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_three) auto builder = Builder(); // one byte, 0xbd - packed_byte_array_pt input(&builder, std::vector{ 0xbd }); + byte_array_ct input(&builder, std::vector{ 0xbd }); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); - fix_byte_array(output_bits); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); + fix_byte_array(output_bytes); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(builder); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -190,13 +207,13 @@ TEST(boomerang_stdlib_sha256, test_graph_for_sha256_NIST_vector_four) auto builder = Builder(); // 4 bytes, 0xc98c8e55 - packed_byte_array_pt input(&builder, std::vector{ 0xc9, 0x8c, 0x8e, 0x55 }); + byte_array_ct input(&builder, std::vector{ 0xc9, 0x8c, 0x8e, 0x55 }); fix_byte_array(input); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); - fix_byte_array(output_bits); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); + fix_byte_array(output_bytes); StaticAnalyzer graph = StaticAnalyzer(builder); auto connected_components = graph.find_connected_components(); EXPECT_EQ(connected_components.size(), 1); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(builder); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ultra_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ultra_recursive_verifier.test.cpp index d168e8e042aa..c520f8f3242c 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ultra_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_ultra_recursive_verifier.test.cpp @@ -27,7 +27,7 @@ template class BoomerangRecursiveVerifierTest : publi using InnerProver = UltraProver_; using InnerVerifier = UltraVerifier_; using InnerBuilder = typename InnerFlavor::CircuitBuilder; - using InnerDeciderProvingKey = DeciderProvingKey_; + using InnerProverInstance = ProverInstance_; using InnerCurve = bn254; using InnerCommitment = InnerFlavor::Commitment; using InnerFF = InnerFlavor::FF; @@ -40,7 +40,7 @@ template class BoomerangRecursiveVerifierTest : publi std::conditional_t, UltraRollupFlavor, UltraFlavor>>; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using RecursiveVerifier = UltraRecursiveVerifier_; using VerificationKey = typename RecursiveVerifier::VerificationKey; @@ -82,7 +82,7 @@ template class BoomerangRecursiveVerifierTest : publi if constexpr (HasIPAAccumulator) { auto [stdlib_opening_claim, ipa_proof] = - IPA>::create_fake_ipa_claim_and_proof(builder); + IPA>::create_random_valid_ipa_claim_and_proof(builder); stdlib_opening_claim.set_public(); builder.ipa_proof = ipa_proof; } @@ -102,9 +102,10 @@ template class BoomerangRecursiveVerifierTest : publi auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit - auto proving_key = std::make_shared(inner_circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - InnerProver inner_prover(proving_key, verification_key); + auto prover_instance = std::make_shared(inner_circuit); + auto verification_key = + std::make_shared(prover_instance->get_precomputed()); + InnerProver inner_prover(prover_instance, verification_key); auto inner_proof = inner_prover.construct_proof(); // Create a recursive verification circuit for the proof of the inner circuit @@ -112,8 +113,10 @@ template class BoomerangRecursiveVerifierTest : publi auto stdlib_vk_and_hash = std::make_shared(outer_circuit, verification_key); RecursiveVerifier verifier{ &outer_circuit, stdlib_vk_and_hash }; - verifier.key->vk_and_hash->vk->num_public_inputs.fix_witness(); - verifier.key->vk_and_hash->vk->pub_inputs_offset.fix_witness(); + verifier.verifier_instance->vk_and_hash->vk->num_public_inputs.fix_witness(); + verifier.verifier_instance->vk_and_hash->vk->pub_inputs_offset.fix_witness(); + // It's currently un-used + verifier.verifier_instance->vk_and_hash->vk->log_circuit_size.fix_witness(); StdlibProof stdlib_inner_proof(outer_circuit, inner_proof); VerifierOutput output = verifier.template verify_proof>(stdlib_inner_proof); @@ -134,9 +137,9 @@ template class BoomerangRecursiveVerifierTest : publi outer_circuit.finalize_circuit(false); auto graph = cdg::StaticAnalyzer(outer_circuit); auto connected_components = graph.find_connected_components(); - EXPECT_EQ(connected_components.size(), 2); + EXPECT_EQ(connected_components.size(), 1); info("Connected components: ", connected_components.size()); - auto variables_in_one_gate = graph.show_variables_in_one_gate(outer_circuit); + auto variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 2); } }; diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/variable_gates_count.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/variable_gates_count.test.cpp index 91e77c30e146..5d1095846e6c 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/variable_gates_count.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/variable_gates_count.test.cpp @@ -20,11 +20,11 @@ TEST(boomerang_ultra_circuit_constructor, test_variable_gates_count_for_decompos auto e = fr(d); auto a_idx = circuit_constructor.add_variable(fr(e)); circuit_constructor.create_add_gate( - { a_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e) }); + { a_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e) }); circuit_constructor.decompose_into_default_range(a_idx, 134); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); - std::unordered_set variables_in_on_gate = graph.show_variables_in_one_gate(circuit_constructor); + std::unordered_set variables_in_on_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_on_gate.size(), 0); } @@ -36,11 +36,11 @@ TEST(boomerang_ultra_circuit_constructor, test_variable_gates_count_for_decompos auto e = fr(d); auto a_idx = circuit_constructor.add_variable(fr(e)); circuit_constructor.create_add_gate( - { a_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e) }); + { a_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e) }); circuit_constructor.decompose_into_default_range(a_idx, 42); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); - auto variables_in_on_gate = graph.show_variables_in_one_gate(circuit_constructor); + auto variables_in_on_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_on_gate.size(), 0); } @@ -78,14 +78,14 @@ TEST(boomerang_ultra_circuit_constructor, test_variable_gates_count_for_two_deco auto a1_idx = circuit_constructor.add_variable(fr(e1)); auto a2_idx = circuit_constructor.add_variable(fr(e2)); circuit_constructor.create_add_gate( - { a1_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e1) }); + { a1_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e1) }); circuit_constructor.create_add_gate( - { a2_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e2) }); + { a2_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e2) }); circuit_constructor.decompose_into_default_range(a1_idx, 42); circuit_constructor.decompose_into_default_range(a2_idx, 42); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(circuit_constructor); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 0); } @@ -110,7 +110,7 @@ TEST(boomerang_ultra_circuit_constructor, test_decompose_with_boolean_gates) } StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); - std::unordered_set variables_in_one_gate = graph.show_variables_in_one_gate(circuit_constructor); + std::unordered_set variables_in_one_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_one_gate.size(), 22); } @@ -122,10 +122,10 @@ TEST(boomerang_ultra_circuit_constructor, test_decompose_for_6_bit_number) auto e = fr(d); auto a_idx = circuit_constructor.add_variable(fr(d)); circuit_constructor.create_add_gate( - { a_idx, circuit_constructor.zero_idx, circuit_constructor.zero_idx, 1, 0, 0, -fr(e) }); + { a_idx, circuit_constructor.zero_idx(), circuit_constructor.zero_idx(), 1, 0, 0, -fr(e) }); circuit_constructor.decompose_into_default_range(a_idx, 6); StaticAnalyzer graph = StaticAnalyzer(circuit_constructor); - std::unordered_set variables_in_on_gate = graph.show_variables_in_one_gate(circuit_constructor); + std::unordered_set variables_in_on_gate = graph.get_variables_in_one_gate(); EXPECT_EQ(variables_in_on_gate.size(), 0); } diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp index 9c16cb0c33d4..79e509e8593b 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp @@ -85,389 +85,431 @@ bool TranslatorCircuitChecker::check(const Builder& circuit) return mini_accumulator; }; - // TODO(https: // github.com/AztecProtocol/barretenberg/issues/1367): Report all failures more explicitly and - // consider making use of relations. - - for (size_t i = 2; i < circuit.num_gates - 1; i += 2) { - { - // Get the values of P.x - Fr op_code = circuit.get_variable(op_wire[i]); - Fr p_x_lo = circuit.get_variable(x_lo_y_hi_wire[i]); - Fr p_x_hi = circuit.get_variable(x_hi_z_1_wire[i]); - Fr p_x_0 = circuit.get_variable(p_x_0_p_x_1_wire[i]); - Fr p_x_1 = circuit.get_variable(p_x_0_p_x_1_wire[i + 1]); - Fr p_x_2 = circuit.get_variable(p_x_2_p_x_3_wire[i]); - Fr p_x_3 = circuit.get_variable(p_x_2_p_x_3_wire[i + 1]); - const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; - - // P.y - Fr p_y_lo = circuit.get_variable(y_lo_z_2_wire[i]); - Fr p_y_hi = circuit.get_variable(x_lo_y_hi_wire[i + 1]); - Fr p_y_0 = circuit.get_variable(p_y_0_p_y_1_wire[i]); - Fr p_y_1 = circuit.get_variable(p_y_0_p_y_1_wire[i + 1]); - Fr p_y_2 = circuit.get_variable(p_y_2_p_y_3_wire[i]); - Fr p_y_3 = circuit.get_variable(p_y_2_p_y_3_wire[i + 1]); - const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; - // z1, z2 - Fr z_1 = circuit.get_variable(x_hi_z_1_wire[i + 1]); - Fr z_2 = circuit.get_variable(y_lo_z_2_wire[i + 1]); - - Fr z_1_lo = circuit.get_variable(z_lo_wire[i]); - Fr z_2_lo = circuit.get_variable(z_lo_wire[i + 1]); - Fr z_1_hi = circuit.get_variable(z_hi_wire[i]); - Fr z_2_hi = circuit.get_variable(z_hi_wire[i + 1]); - - const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; - const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; - // Relation limbs - Fr low_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i]); - Fr high_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i + 1]); - - // Current accumulator (updated value) - const std::vector current_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[i]), - circuit.get_variable(accumulators_binary_limbs_1_wire[i]), - circuit.get_variable(accumulators_binary_limbs_2_wire[i]), - circuit.get_variable(accumulators_binary_limbs_3_wire[i]), - }; + auto check_binary_limbs_equality = [&](const std::vector& first, const std::vector& second, size_t gate) { + for (const auto [first_limb, second_limb] : zip_view(first, second)) { + if (first_limb != second_limb) { + return report_fail("Binary limbs are not equal = ", gate); + } + } + return true; + }; - // Previous accumulator - const std::vector previous_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_1_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_2_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_3_wire[i + 1]), + auto check_accumulator_transfer = [&](const std::vector& previous_accumulator, size_t gate) { + if (gate % 2 != 1) { + return report_fail("accumulator transfer should only be checked at odd gates = ", gate); + } + if (gate + 1 < circuit.num_gates - 1) { + // Check that the next gate's current accumulator equals this gate's previous accumulator + const std::vector next_gate_current_accumulator = { + circuit.get_variable(accumulators_binary_limbs_0_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_1_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_2_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_3_wire[gate + 1]), }; + if (!check_binary_limbs_equality(next_gate_current_accumulator, previous_accumulator, gate + 1)) { + return false; + } + } else { + // Check accumulator starts at zero + for (const auto& limb : previous_accumulator) { + if (limb != Fr(0)) { + return report_fail("accumulator doesn't start with 0 = ", gate + 1); + } + } + } + return true; + }; - // Quotient - const std::vector quotient_binary_limbs = { - circuit.get_variable(quotient_low_binary_limbs[i]), - circuit.get_variable(quotient_low_binary_limbs[i + 1]), - circuit.get_variable(quotient_high_binary_limbs[i]), - circuit.get_variable(quotient_high_binary_limbs[i + 1]), - }; + auto check_no_op = + [&](const std::vector& current_accumulator, const std::vector& previous_accumulator, size_t gate) { + if (!check_binary_limbs_equality(current_accumulator, previous_accumulator, gate)) { + return false; + } + return check_accumulator_transfer(previous_accumulator, gate + 1); + }; - const size_t NUM_MICRO_LIMBS = Builder::NUM_MICRO_LIMBS; + auto check_random_op_code = [&](const Fr op_code, size_t gate) { + if (gate % 2 == 0) { + if (op_code == Fr(0) || op_code == Fr(3) || op_code == Fr(4) || op_code == Fr(8)) { + return report_fail("Opcode should be random value at even gate = ", gate); + } + } else { + if (op_code == Fr(0)) { + return report_fail("Opcode should be 0 at odd gate = ", gate); + } + } + return true; + }; - // Get micro chunks for checking decomposition and range - auto p_x_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto p_y_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto z_1_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; + // TODO(https: // github.com/AztecProtocol/barretenberg/issues/1367): Report all failures more explicitly and + // consider making use of relations. + auto in_random_range = [&](size_t i) { + return (i >= 2 * Builder::NUM_NO_OPS_START && i < RESULT_ROW) || + (i >= circuit.num_gates - (circuit.avm_mode ? 0 : 2 * Builder::NUM_RANDOM_OPS_END) && + i < circuit.num_gates); + }; + for (size_t i = 2; i < circuit.num_gates - 1; i += 2) { - auto z_2_micro_chunks = { + // Ensure random op is present in expected ranges + Fr op_code = circuit.get_variable(op_wire[i]); + if (in_random_range(i)) { + check_random_op_code(op_code, i); + Fr op_code_next = circuit.get_variable(op_wire[i + 1]); + check_random_op_code(op_code_next, i + 1); + continue; + } - get_sequential_micro_chunks(i + 1, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + // Current accumulator (updated value) + const std::vector current_accumulator_binary_limbs = { + circuit.get_variable(accumulators_binary_limbs_0_wire[i]), + circuit.get_variable(accumulators_binary_limbs_1_wire[i]), + circuit.get_variable(accumulators_binary_limbs_2_wire[i]), + circuit.get_variable(accumulators_binary_limbs_3_wire[i]), + }; + + // Previous accumulator + const std::vector previous_accumulator_binary_limbs = { + circuit.get_variable(accumulators_binary_limbs_0_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_1_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_2_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_3_wire[i + 1]), + }; + + if (op_code == 0) { + if (!check_no_op(current_accumulator_binary_limbs, previous_accumulator_binary_limbs, i)) { + return false; }; + continue; + } - auto current_accumulator_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; - auto quotient_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - }; + Fr p_x_lo = circuit.get_variable(x_lo_y_hi_wire[i]); + Fr p_x_hi = circuit.get_variable(x_hi_z_1_wire[i]); + Fr p_x_0 = circuit.get_variable(p_x_0_p_x_1_wire[i]); + Fr p_x_1 = circuit.get_variable(p_x_0_p_x_1_wire[i + 1]); + Fr p_x_2 = circuit.get_variable(p_x_2_p_x_3_wire[i]); + Fr p_x_3 = circuit.get_variable(p_x_2_p_x_3_wire[i + 1]); + const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; + + // P.y + Fr p_y_lo = circuit.get_variable(y_lo_z_2_wire[i]); + Fr p_y_hi = circuit.get_variable(x_lo_y_hi_wire[i + 1]); + Fr p_y_0 = circuit.get_variable(p_y_0_p_y_1_wire[i]); + Fr p_y_1 = circuit.get_variable(p_y_0_p_y_1_wire[i + 1]); + Fr p_y_2 = circuit.get_variable(p_y_2_p_y_3_wire[i]); + Fr p_y_3 = circuit.get_variable(p_y_2_p_y_3_wire[i + 1]); + const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; + // z1, z2 + Fr z_1 = circuit.get_variable(x_hi_z_1_wire[i + 1]); + Fr z_2 = circuit.get_variable(y_lo_z_2_wire[i + 1]); + + Fr z_1_lo = circuit.get_variable(z_lo_wire[i]); + Fr z_2_lo = circuit.get_variable(z_lo_wire[i + 1]); + Fr z_1_hi = circuit.get_variable(z_hi_wire[i]); + Fr z_2_hi = circuit.get_variable(z_hi_wire[i + 1]); + + const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; + const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; + // Relation limbs + Fr low_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i]); + Fr high_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i + 1]); + + // Quotient + const std::vector quotient_binary_limbs = { + circuit.get_variable(quotient_low_binary_limbs[i]), + circuit.get_variable(quotient_low_binary_limbs[i + 1]), + circuit.get_variable(quotient_high_binary_limbs[i]), + circuit.get_variable(quotient_high_binary_limbs[i + 1]), + }; + + const size_t NUM_MICRO_LIMBS = Builder::NUM_MICRO_LIMBS; + + // Get micro chunks for checking decomposition and range + auto p_x_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto p_y_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto z_1_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + + auto z_2_micro_chunks = { + + get_sequential_micro_chunks(i + 1, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + + auto current_accumulator_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + auto quotient_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + }; + + // Lambda for checking the correctness of decomposition of values in the Queue into limbs for + // checking the relation + auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, + const std::vector& binary_limbs) { + BB_ASSERT_EQ(wide_limbs.size() * 2, binary_limbs.size()); + for (size_t i = 0; i < wide_limbs.size(); i++) { + if ((binary_limbs[i * 2] + Fr(Builder::SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { + return false; + } + } + return true; + }; + // Check that everything has been decomposed correctly + // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 + // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 + // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ + // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ + if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { + + return report_fail("wide limb decomposition failied at row = ", i); + } - // Lambda for checking the correctness of decomposition of values in the Queue into limbs for - // checking the relation - auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, - const std::vector& binary_limbs) { - BB_ASSERT_EQ(wide_limbs.size() * 2, binary_limbs.size()); - for (size_t i = 0; i < wide_limbs.size(); i++) { - if ((binary_limbs[i * 2] + Fr(Builder::SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { + enum LimbSeriesType { STANDARD_COORDINATE, Z_SCALAR, QUOTIENT }; + + // Check that limbs have been decomposed into microlimbs correctly + // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift + auto check_micro_limb_decomposition_correctness = [&accumulate_limb_from_micro_chunks]( + const std::vector& binary_limbs, + const std::vector>& micro_limbs, + const LimbSeriesType limb_series_type) { + // Shifts for decompositions + constexpr auto SHIFT_12_TO_14 = Fr(4); + constexpr auto SHIFT_10_TO_14 = Fr(16); + constexpr auto SHIFT_8_TO_14 = Fr(64); + constexpr auto SHIFT_4_TO_14 = Fr(1024); + + BB_ASSERT_EQ(binary_limbs.size(), micro_limbs.size()); + // First check that all the microlimbs are properly range constrained + for (auto& micro_limb_series : micro_limbs) { + for (auto& micro_limb : micro_limb_series) { + if (uint256_t(micro_limb) > Builder::MAX_MICRO_LIMB_SIZE) { return false; } } - return true; - }; - // Check that everything has been decomposed correctly - // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 - // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 - // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ - // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ - if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { - - return report_fail("wide limb decomposition failied at row = ", i); } - - enum LimbSeriesType { STANDARD_COORDINATE, Z_SCALAR, QUOTIENT }; - - // Check that limbs have been decomposed into microlimbs correctly - // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift - auto check_micro_limb_decomposition_correctness = [&accumulate_limb_from_micro_chunks]( - const std::vector& binary_limbs, - const std::vector>& micro_limbs, - const LimbSeriesType limb_series_type) { - // Shifts for decompositions - constexpr auto SHIFT_12_TO_14 = Fr(4); - constexpr auto SHIFT_10_TO_14 = Fr(16); - constexpr auto SHIFT_8_TO_14 = Fr(64); - constexpr auto SHIFT_4_TO_14 = Fr(1024); - - BB_ASSERT_EQ(binary_limbs.size(), micro_limbs.size()); - // First check that all the microlimbs are properly range constrained - for (auto& micro_limb_series : micro_limbs) { - for (auto& micro_limb : micro_limb_series) { - if (uint256_t(micro_limb) > Builder::MAX_MICRO_LIMB_SIZE) { - return false; - } - } + // For low limbs the last microlimb is used with the shift, so we skip it when reconstructing + // the limb + const size_t SKIPPED_FOR_LOW_LIMBS = 1; + for (size_t i = 0; i < binary_limbs.size() - 1; i++) { + if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i], SKIPPED_FOR_LOW_LIMBS)) { + return false; } - // For low limbs the last microlimb is used with the shift, so we skip it when reconstructing - // the limb - const size_t SKIPPED_FOR_LOW_LIMBS = 1; - for (size_t i = 0; i < binary_limbs.size() - 1; i++) { - if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i], SKIPPED_FOR_LOW_LIMBS)) { - return false; - } - // Check last additional constraint (68->70) - if (micro_limbs[i][NUM_MICRO_LIMBS - 1] != (SHIFT_12_TO_14 * micro_limbs[i][NUM_MICRO_LIMBS - 2])) { - return false; - } + // Check last additional constraint (68->70) + if (micro_limbs[i][NUM_MICRO_LIMBS - 1] != (SHIFT_12_TO_14 * micro_limbs[i][NUM_MICRO_LIMBS - 2])) { + return false; } + } - const size_t SKIPPED_FOR_STANDARD = 2; - const size_t SKIPPED_FOR_Z_SCALARS = 1; - const size_t SKIPPED_FOR_QUOTIENT = 2; - switch (limb_series_type) { - case STANDARD_COORDINATE: - // For standard Fq value the highest limb is 50 bits, so we skip the top 2 microlimbs - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_STANDARD)) { - return false; - } - // Check last additional constraint (50->56) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD] != - (SHIFT_8_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD - 1])) { - - return false; - } - break; - // For z top limbs we need as many microlimbs as for the low limbs - case Z_SCALAR: - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], - SKIPPED_FOR_Z_SCALARS)) { - return false; - } - // Check last additional constraint (60->70) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS] != - (SHIFT_4_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS - 1])) { - return false; - } - break; - // Quotient also doesn't need the top 2 - case QUOTIENT: - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_QUOTIENT)) { - return false; - } - // Check last additional constraint (52->56) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT] != - (SHIFT_10_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT - 1])) { - return false; - } - break; - default: - abort(); + const size_t SKIPPED_FOR_STANDARD = 2; + const size_t SKIPPED_FOR_Z_SCALARS = 1; + const size_t SKIPPED_FOR_QUOTIENT = 2; + switch (limb_series_type) { + case STANDARD_COORDINATE: + // For standard Fq value the highest limb is 50 bits, so we skip the top 2 microlimbs + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_STANDARD)) { + return false; } + // Check last additional constraint (50->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD] != + (SHIFT_8_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD - 1])) { - return true; - }; - // Check all micro limb decompositions - if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks, Z_SCALAR)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks, Z_SCALAR)) { - return false; - } - if (!check_micro_limb_decomposition_correctness( - current_accumulator_binary_limbs, current_accumulator_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks, QUOTIENT)) { - return false; - } - - // The logic we are trying to enforce is: - // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ - // v⁴ mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + - // P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we - // don't have integers. Despite that, we can approximate integers for a certain range, if we know - // that there will not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the - // logic modulo 2²⁷² with range constraints and r is native. - // - // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 => - // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod - // 2²⁷²) - current_accumulator = 0 mod 2²⁷² - // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 mod r - // - // The second relation is straightforward and easy to check. The first, not so much. We have to - // evaluate certain bit chunks of the equation and ensure that they are zero. For example, for the - // lowest limb it would be (inclusive ranges): - // - // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + - // z_1[0:67] ⋅ v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - - // current_accumulator[0:67] = intermediate_value; (we don't take parts of op, because it's supposed - // to be between 0 and 3) - // - // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and - // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which - // require us to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. - // The result of division goes into the next evaluation (the same as a carry flag would) - // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + - // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ - // - // The high is: - // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + - // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ - // - // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest - // limb and z_1 and z_2 have only the two lowest limbs - constexpr std::array NEGATIVE_MODULUS_LIMBS = Builder::NEGATIVE_MODULUS_LIMBS; - const uint256_t SHIFT_1 = Builder::SHIFT_1; - const uint256_t SHIFT_2 = Builder::SHIFT_2; - const uint256_t SHIFT_3 = Builder::SHIFT_3; - Fr low_wide_limb_relation_check = - - (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + - relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + - relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + - (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + - relation_inputs.v_limbs[1] * p_x_0 + relation_inputs.v_squared_limbs[1] * p_y_0 + - relation_inputs.v_cubed_limbs[1] * z_1_lo + relation_inputs.v_quarted_limbs[1] * z_2_lo + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + - relation_inputs.v_limbs[0] * p_x_1 + relation_inputs.v_squared_limbs[0] * p_y_1 + - relation_inputs.v_cubed_limbs[0] * z_1_hi + relation_inputs.v_quarted_limbs[0] * z_2_hi + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - current_accumulator_binary_limbs[1]) * - Fr(SHIFT_1); - if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { - return false; - } - Fr high_wide_relation_limb_check = - low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + relation_inputs.v_limbs[2] * p_x_0 + - relation_inputs.v_limbs[1] * p_x_1 + relation_inputs.v_limbs[0] * p_x_2 + - relation_inputs.v_squared_limbs[2] * p_y_0 + relation_inputs.v_squared_limbs[1] * p_y_1 + - relation_inputs.v_squared_limbs[0] * p_y_2 + relation_inputs.v_cubed_limbs[2] * z_1_lo + - relation_inputs.v_cubed_limbs[1] * z_1_hi + relation_inputs.v_quarted_limbs[2] * z_2_lo + - relation_inputs.v_quarted_limbs[1] * z_2_hi + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + - (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + - relation_inputs.v_limbs[3] * p_x_0 + relation_inputs.v_limbs[2] * p_x_1 + - relation_inputs.v_limbs[1] * p_x_2 + relation_inputs.v_limbs[0] * p_x_3 + - relation_inputs.v_squared_limbs[3] * p_y_0 + relation_inputs.v_squared_limbs[2] * p_y_1 + - relation_inputs.v_squared_limbs[1] * p_y_2 + relation_inputs.v_squared_limbs[0] * p_y_3 + - relation_inputs.v_cubed_limbs[3] * z_1_lo + relation_inputs.v_cubed_limbs[2] * z_1_hi + - relation_inputs.v_quarted_limbs[3] * z_2_lo + relation_inputs.v_quarted_limbs[2] * z_2_hi + - quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * - SHIFT_1; - if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { - return false; - } - // Apart from checking the correctness of the evaluation modulo 2²⁷² we also need to ensure that the - // logic works in our scalar field. For this we reconstruct the scalar field values from individual - // limbs - auto reconstructed_p_x = (p_x_0 + p_x_1 * SHIFT_1 + p_x_2 * SHIFT_2 + p_x_3 * SHIFT_3); - auto reconstructed_p_y = (p_y_0 + p_y_1 * SHIFT_1 + p_y_2 * SHIFT_2 + p_y_3 * SHIFT_3); - auto reconstructed_current_accumulator = - (current_accumulator_binary_limbs[0] + current_accumulator_binary_limbs[1] * SHIFT_1 + - current_accumulator_binary_limbs[2] * SHIFT_2 + current_accumulator_binary_limbs[3] * SHIFT_3); - auto reconstructed_previous_accumulator = - (previous_accumulator_binary_limbs[0] + previous_accumulator_binary_limbs[1] * SHIFT_1 + - previous_accumulator_binary_limbs[2] * SHIFT_2 + previous_accumulator_binary_limbs[3] * SHIFT_3); - - auto reconstructed_z1 = (z_1_lo + z_1_hi * SHIFT_1); - auto reconstructed_z2 = (z_2_lo + z_2_hi * SHIFT_1); - auto reconstructed_quotient = (quotient_binary_limbs[0] + quotient_binary_limbs[1] * SHIFT_1 + - quotient_binary_limbs[2] * SHIFT_2 + quotient_binary_limbs[3] * SHIFT_3); - - // Check the relation - if (!(reconstructed_previous_accumulator * reconstructed_evaluation_input_x + op_code + - reconstructed_p_x * reconstructed_batching_evaluation_v + - reconstructed_p_y * reconstructed_batching_evaluation_v2 + - reconstructed_z1 * reconstructed_batching_evaluation_v3 + - reconstructed_z2 * reconstructed_batching_evaluation_v4 + - reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) - .is_zero()) { - return false; - }; - } - { - size_t odd_gate_index = i + 1; - // Check the accumulator is copied correctly - const std::vector current_accumulator_binary_limbs_copy = { - circuit.get_variable(accumulators_binary_limbs_0_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_1_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_2_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_3_wire[odd_gate_index]), - }; - if (odd_gate_index < circuit.num_gates - 1) { - size_t next_even_gate_index = i + 2; - const std::vector current_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_1_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_2_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_3_wire[next_even_gate_index]), - }; - - for (size_t j = 0; j < current_accumulator_binary_limbs.size(); j++) { - if (current_accumulator_binary_limbs_copy[j] != current_accumulator_binary_limbs[j]) { - return report_fail("accumulator copy failed at row = ", odd_gate_index); - } + return false; } - } else { - // Check accumulator starts at zero - for (const auto& limb : current_accumulator_binary_limbs_copy) { - if (limb != Fr(0)) { - return report_fail("accumulator doesn't start with 0 = ", odd_gate_index); - } + break; + // For z top limbs we need as many microlimbs as for the low limbs + case Z_SCALAR: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_Z_SCALARS)) { + return false; + } + // Check last additional constraint (60->70) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS] != + (SHIFT_4_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS - 1])) { + return false; + } + break; + // Quotient also doesn't need the top 2 + case QUOTIENT: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_QUOTIENT)) { + return false; + } + // Check last additional constraint (52->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT] != + (SHIFT_10_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT - 1])) { + return false; } + break; + default: + abort(); } + + return true; + }; + // Check all micro limb decompositions + if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness( + current_accumulator_binary_limbs, current_accumulator_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks, QUOTIENT)) { + return false; } - } + + // The logic we are trying to enforce is: + // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ + // v⁴ mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + + // P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we + // don't have integers. Despite that, we can approximate integers for a certain range, if we know + // that there will not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the + // logic modulo 2²⁷² with range constraints and r is native. + // + // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 => + // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod + // 2²⁷²) - current_accumulator = 0 mod 2²⁷² + // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 mod r + // + // The second relation is straightforward and easy to check. The first, not so much. We have to + // evaluate certain bit chunks of the equation and ensure that they are zero. For example, for the + // lowest limb it would be (inclusive ranges): + // + // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + + // z_1[0:67] ⋅ v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - + // current_accumulator[0:67] = intermediate_value; (we don't take parts of op, because it's supposed + // to be between 0 and 3) + // + // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and + // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which + // require us to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. + // The result of division goes into the next evaluation (the same as a carry flag would) + // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + + // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ + // + // The high is: + // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + + // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ + // + // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest + // limb and z_1 and z_2 have only the two lowest limbs + constexpr std::array NEGATIVE_MODULUS_LIMBS = Builder::NEGATIVE_MODULUS_LIMBS; + const uint256_t SHIFT_1 = Builder::SHIFT_1; + const uint256_t SHIFT_2 = Builder::SHIFT_2; + const uint256_t SHIFT_3 = Builder::SHIFT_3; + Fr low_wide_limb_relation_check = + + (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + + relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + + relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + + (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + relation_inputs.v_limbs[1] * p_x_0 + + relation_inputs.v_squared_limbs[1] * p_y_0 + relation_inputs.v_cubed_limbs[1] * z_1_lo + + relation_inputs.v_quarted_limbs[1] * z_2_lo + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + relation_inputs.v_limbs[0] * p_x_1 + + relation_inputs.v_squared_limbs[0] * p_y_1 + relation_inputs.v_cubed_limbs[0] * z_1_hi + + relation_inputs.v_quarted_limbs[0] * z_2_hi + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - + current_accumulator_binary_limbs[1]) * + Fr(SHIFT_1); + if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { + return false; + } + Fr high_wide_relation_limb_check = + low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + relation_inputs.v_limbs[2] * p_x_0 + + relation_inputs.v_limbs[1] * p_x_1 + relation_inputs.v_limbs[0] * p_x_2 + + relation_inputs.v_squared_limbs[2] * p_y_0 + relation_inputs.v_squared_limbs[1] * p_y_1 + + relation_inputs.v_squared_limbs[0] * p_y_2 + relation_inputs.v_cubed_limbs[2] * z_1_lo + + relation_inputs.v_cubed_limbs[1] * z_1_hi + relation_inputs.v_quarted_limbs[2] * z_2_lo + + relation_inputs.v_quarted_limbs[1] * z_2_hi + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + + (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + relation_inputs.v_limbs[3] * p_x_0 + + relation_inputs.v_limbs[2] * p_x_1 + relation_inputs.v_limbs[1] * p_x_2 + + relation_inputs.v_limbs[0] * p_x_3 + relation_inputs.v_squared_limbs[3] * p_y_0 + + relation_inputs.v_squared_limbs[2] * p_y_1 + relation_inputs.v_squared_limbs[1] * p_y_2 + + relation_inputs.v_squared_limbs[0] * p_y_3 + relation_inputs.v_cubed_limbs[3] * z_1_lo + + relation_inputs.v_cubed_limbs[2] * z_1_hi + relation_inputs.v_quarted_limbs[3] * z_2_lo + + relation_inputs.v_quarted_limbs[2] * z_2_hi + quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * + SHIFT_1; + if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { + return false; + } + // Apart from checking the correctness of the evaluation modulo 2²⁷² we also need to ensure that the + // logic works in our scalar field. For this we reconstruct the scalar field values from individual + // limbs + auto reconstructed_p_x = (p_x_0 + p_x_1 * SHIFT_1 + p_x_2 * SHIFT_2 + p_x_3 * SHIFT_3); + auto reconstructed_p_y = (p_y_0 + p_y_1 * SHIFT_1 + p_y_2 * SHIFT_2 + p_y_3 * SHIFT_3); + auto reconstructed_current_accumulator = + (current_accumulator_binary_limbs[0] + current_accumulator_binary_limbs[1] * SHIFT_1 + + current_accumulator_binary_limbs[2] * SHIFT_2 + current_accumulator_binary_limbs[3] * SHIFT_3); + auto reconstructed_previous_accumulator = + (previous_accumulator_binary_limbs[0] + previous_accumulator_binary_limbs[1] * SHIFT_1 + + previous_accumulator_binary_limbs[2] * SHIFT_2 + previous_accumulator_binary_limbs[3] * SHIFT_3); + + auto reconstructed_z1 = (z_1_lo + z_1_hi * SHIFT_1); + auto reconstructed_z2 = (z_2_lo + z_2_hi * SHIFT_1); + auto reconstructed_quotient = (quotient_binary_limbs[0] + quotient_binary_limbs[1] * SHIFT_1 + + quotient_binary_limbs[2] * SHIFT_2 + quotient_binary_limbs[3] * SHIFT_3); + + // Check the relation + if (!(reconstructed_previous_accumulator * reconstructed_evaluation_input_x + op_code + + reconstructed_p_x * reconstructed_batching_evaluation_v + + reconstructed_p_y * reconstructed_batching_evaluation_v2 + + reconstructed_z1 * reconstructed_batching_evaluation_v3 + + reconstructed_z2 * reconstructed_batching_evaluation_v4 + + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) + .is_zero()) { + return false; + }; + + if (!check_accumulator_transfer(previous_accumulator_binary_limbs, i + 1)) { + return false; + } + }; return true; -}; +} }; // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp index 61929ec1d65b..9eaf8f2b4ec7 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp @@ -1,9 +1,12 @@ #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" #include "barretenberg/circuit_checker/circuit_checker.hpp" #include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" +#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp" #include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/fixed_base/fixed_base.hpp" +#include #include using namespace bb; @@ -109,8 +112,8 @@ TEST(UltraCircuitBuilder, BadLookupFailure) // Erroneously set a non-zero wire value to zero in one of the lookup gates for (auto& wire_3_witness_idx : builder.blocks.lookup.w_o()) { - if (wire_3_witness_idx != builder.zero_idx) { - wire_3_witness_idx = builder.zero_idx; + if (wire_3_witness_idx != builder.zero_idx()) { + wire_3_witness_idx = builder.zero_idx(); break; } } @@ -207,8 +210,8 @@ TEST(UltraCircuitBuilder, NonTrivialTagPermutation) auto c_idx = builder.add_variable(b); auto d_idx = builder.add_variable(a); - builder.create_add_gate({ a_idx, b_idx, builder.zero_idx, fr::one(), fr::one(), fr::zero(), fr::zero() }); - builder.create_add_gate({ c_idx, d_idx, builder.zero_idx, fr::one(), fr::one(), fr::zero(), fr::zero() }); + builder.create_add_gate({ a_idx, b_idx, builder.zero_idx(), fr::one(), fr::one(), fr::zero(), fr::zero() }); + builder.create_add_gate({ c_idx, d_idx, builder.zero_idx(), fr::one(), fr::one(), fr::zero(), fr::zero() }); builder.create_tag(1, 2); builder.create_tag(2, 1); @@ -253,9 +256,9 @@ TEST(UltraCircuitBuilder, NonTrivialTagPermutationAndCycles) builder.assign_tag(e_idx, 2); builder.assign_tag(g_idx, 2); - builder.create_add_gate({ b_idx, a_idx, builder.zero_idx, fr::one(), fr::neg_one(), fr::zero(), fr::zero() }); - builder.create_add_gate({ c_idx, g_idx, builder.zero_idx, fr::one(), -fr::one(), fr::zero(), fr::zero() }); - builder.create_add_gate({ e_idx, f_idx, builder.zero_idx, fr::one(), -fr::one(), fr::zero(), fr::zero() }); + builder.create_add_gate({ b_idx, a_idx, builder.zero_idx(), fr::one(), fr::neg_one(), fr::zero(), fr::zero() }); + builder.create_add_gate({ c_idx, g_idx, builder.zero_idx(), fr::one(), -fr::one(), fr::zero(), fr::zero() }); + builder.create_add_gate({ e_idx, f_idx, builder.zero_idx(), fr::one(), -fr::one(), fr::zero(), fr::zero() }); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); @@ -275,8 +278,8 @@ TEST(UltraCircuitBuilder, BadTagPermutation) auto c_idx = builder.add_variable(b); auto d_idx = builder.add_variable(a + 1); - builder.create_add_gate({ a_idx, b_idx, builder.zero_idx, 1, 1, 0, 0 }); - builder.create_add_gate({ c_idx, d_idx, builder.zero_idx, 1, 1, 0, -1 }); + builder.create_add_gate({ a_idx, b_idx, builder.zero_idx(), 1, 1, 0, 0 }); + builder.create_add_gate({ c_idx, d_idx, builder.zero_idx(), 1, 1, 0, -1 }); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); @@ -428,7 +431,7 @@ TEST(UltraCircuitBuilder, RangeConstraint) builder.create_new_range_constraint(indices[i], 3); } // auto ind = {a_idx,b_idx,c_idx,d_idx,e_idx,f_idx,g_idx,h_idx}; - builder.create_dummy_constraints(indices); + builder.create_unconstrained_gates(indices); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -449,7 +452,7 @@ TEST(UltraCircuitBuilder, RangeConstraint) for (size_t i = 0; i < indices.size(); i++) { builder.create_new_range_constraint(indices[i], 128); } - builder.create_dummy_constraints(indices); + builder.create_unconstrained_gates(indices); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -460,7 +463,7 @@ TEST(UltraCircuitBuilder, RangeConstraint) for (size_t i = 0; i < indices.size(); i++) { builder.create_new_range_constraint(indices[i], 79); } - builder.create_dummy_constraints(indices); + builder.create_unconstrained_gates(indices); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, false); } @@ -471,7 +474,7 @@ TEST(UltraCircuitBuilder, RangeConstraint) for (size_t i = 0; i < indices.size(); i++) { builder.create_new_range_constraint(indices[i], 79); } - builder.create_dummy_constraints(indices); + builder.create_unconstrained_gates(indices); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, false); } @@ -485,10 +488,10 @@ TEST(UltraCircuitBuilder, RangeWithGates) builder.create_new_range_constraint(idx[i], 8); } - builder.create_add_gate({ idx[0], idx[1], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -3 }); - builder.create_add_gate({ idx[2], idx[3], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -7 }); - builder.create_add_gate({ idx[4], idx[5], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -11 }); - builder.create_add_gate({ idx[6], idx[7], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -15 }); + builder.create_add_gate({ idx[0], idx[1], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -3 }); + builder.create_add_gate({ idx[2], idx[3], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -7 }); + builder.create_add_gate({ idx[4], idx[5], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -11 }); + builder.create_add_gate({ idx[6], idx[7], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -15 }); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -501,10 +504,10 @@ TEST(UltraCircuitBuilder, RangeWithGatesWhereRangeIsNotAPowerOfTwo) builder.create_new_range_constraint(idx[i], 12); } - builder.create_add_gate({ idx[0], idx[1], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -3 }); - builder.create_add_gate({ idx[2], idx[3], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -7 }); - builder.create_add_gate({ idx[4], idx[5], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -11 }); - builder.create_add_gate({ idx[6], idx[7], builder.zero_idx, fr::one(), fr::one(), fr::zero(), -15 }); + builder.create_add_gate({ idx[0], idx[1], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -3 }); + builder.create_add_gate({ idx[2], idx[3], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -7 }); + builder.create_add_gate({ idx[4], idx[5], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -11 }); + builder.create_add_gate({ idx[6], idx[7], builder.zero_idx(), fr::one(), fr::one(), fr::zero(), -15 }); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -562,7 +565,7 @@ TEST(UltraCircuitBuilder, ComposedRangeConstraint) auto d = uint256_t(c).slice(0, 133); auto e = fr(d); auto a_idx = builder.add_variable(fr(e)); - builder.create_add_gate({ a_idx, builder.zero_idx, builder.zero_idx, 1, 0, 0, -fr(e) }); + builder.create_add_gate({ a_idx, builder.zero_idx(), builder.zero_idx(), 1, 0, 0, -fr(e) }); builder.decompose_into_default_range(a_idx, 134); // odd num bits - divisible by 3 @@ -570,31 +573,21 @@ TEST(UltraCircuitBuilder, ComposedRangeConstraint) auto d_1 = uint256_t(c_1).slice(0, 126); auto e_1 = fr(d_1); auto a_idx_1 = builder.add_variable(fr(e_1)); - builder.create_add_gate({ a_idx_1, builder.zero_idx, builder.zero_idx, 1, 0, 0, -fr(e_1) }); + builder.create_add_gate({ a_idx_1, builder.zero_idx(), builder.zero_idx(), 1, 0, 0, -fr(e_1) }); builder.decompose_into_default_range(a_idx_1, 127); bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } -TEST(UltraCircuitBuilder, NonNativeFieldMultiplication) +static std::array helper_non_native_multiplication(UltraCircuitBuilder& builder, + const fq& a, + const fq& b, + const uint256_t& q, + const uint256_t& r, + const uint256_t& modulus) { - UltraCircuitBuilder builder = UltraCircuitBuilder(); - - fq a = fq::random_element(); - fq b = fq::random_element(); - uint256_t modulus = fq::modulus; - - uint1024_t a_big = uint512_t(uint256_t(a)); - uint1024_t b_big = uint512_t(uint256_t(b)); - uint1024_t p_big = uint512_t(uint256_t(modulus)); - - uint1024_t q_big = (a_big * b_big) / p_big; - uint1024_t r_big = (a_big * b_big) % p_big; - - uint256_t q(q_big.lo.lo); - uint256_t r(r_big.lo.lo); - + // Splits a 256-bit integer into 4 68-bit limbs const auto split_into_limbs = [&](const uint512_t& input) { constexpr size_t NUM_BITS = 68; std::array limbs; @@ -605,6 +598,7 @@ TEST(UltraCircuitBuilder, NonNativeFieldMultiplication) return limbs; }; + // Adds the 4 limbs as circuit variables and returns their indices const auto get_limb_witness_indices = [&](const std::array& limbs) { std::array limb_indices; limb_indices[0] = builder.add_variable(limbs[0]); @@ -613,22 +607,109 @@ TEST(UltraCircuitBuilder, NonNativeFieldMultiplication) limb_indices[3] = builder.add_variable(limbs[3]); return limb_indices; }; + + // Compute negative modulus: (-p) := 2^T - p const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4); auto modulus_limbs = split_into_limbs(BINARY_BASIS_MODULUS - uint512_t(modulus)); + // Add a, b, q, r as circuit variables const auto a_indices = get_limb_witness_indices(split_into_limbs(uint256_t(a))); const auto b_indices = get_limb_witness_indices(split_into_limbs(uint256_t(b))); const auto q_indices = get_limb_witness_indices(split_into_limbs(uint256_t(q))); const auto r_indices = get_limb_witness_indices(split_into_limbs(uint256_t(r))); + // Prepare inputs for non-native multiplication gadget non_native_multiplication_witnesses inputs{ a_indices, b_indices, q_indices, r_indices, modulus_limbs, }; const auto [lo_1_idx, hi_1_idx] = builder.evaluate_non_native_field_multiplication(inputs); - builder.range_constrain_two_limbs(lo_1_idx, hi_1_idx, 70, 70); - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); + return { lo_1_idx, hi_1_idx }; +} + +TEST(UltraCircuitBuilder, NonNativeFieldMultiplication) +{ + const size_t num_iterations = 50; + for (size_t i = 0; i < num_iterations; i++) { + UltraCircuitBuilder builder = UltraCircuitBuilder(); + + fq a = fq::random_element(); + fq b = fq::random_element(); + uint256_t modulus = fq::modulus; + + uint1024_t a_big = uint512_t(uint256_t(a)); + uint1024_t b_big = uint512_t(uint256_t(b)); + uint1024_t p_big = uint512_t(uint256_t(modulus)); + + uint1024_t q_big = (a_big * b_big) / p_big; + uint1024_t r_big = (a_big * b_big) % p_big; + + uint256_t q(q_big.lo.lo); + uint256_t r(r_big.lo.lo); + + const auto [lo_1_idx, hi_1_idx] = helper_non_native_multiplication(builder, a, b, q, r, modulus); + + // Range check the carry (output) lo and hi limbs + const bool is_low_70_bits = uint256_t(builder.get_variable(lo_1_idx)).get_msb() < 70; + const bool is_high_70_bits = uint256_t(builder.get_variable(hi_1_idx)).get_msb() < 70; + if (is_low_70_bits && is_high_70_bits) { + // Uses more efficient NNF range check if both limbs are < 2^70 + builder.range_constrain_two_limbs(lo_1_idx, hi_1_idx, 70, 70); + } else { + // Fallback to default range checks + builder.decompose_into_default_range(lo_1_idx, 72); + builder.decompose_into_default_range(hi_1_idx, 72); + } + + bool result = CircuitChecker::check(builder); + EXPECT_EQ(result, true); + } +} + +TEST(UltraCircuitBuilder, NonNativeFieldMultiplicationRegression) +{ + UltraCircuitBuilder builder = UltraCircuitBuilder(); + + // Edge case values + uint256_t a_u256 = uint256_t("0x00ab1504deacff852326adf4a01099e9340f232e2a631042852fce3c4eb8a51b"); + uint256_t b_u256 = uint256_t("0x1be457323502cfcd85f8cfa54c8c4fea146b9db2a7d86b29d966d61b714ee249"); + uint256_t q_u256 = uint256_t("0x00629b9d576dfc6b5c28a4a254d5e8e3384124f6a898858e95265254a01414d5"); + uint256_t r_u256 = uint256_t("0x2c1590eb70a48dce72f7686bbf79b59bf7926c99bc16aba92e474c65a04ea2a0"); + uint256_t modulus = fq::modulus; + + // Check if native computation yeils same q and r + uint1024_t a_big = uint512_t(a_u256); + uint1024_t b_big = uint512_t(b_u256); + uint1024_t p_big = uint512_t(uint256_t(modulus)); + + uint1024_t q_big = (a_big * b_big) / p_big; + uint1024_t r_big = (a_big * b_big) % p_big; + uint256_t q_computed(q_big.lo.lo); + uint256_t r_computed(r_big.lo.lo); + + EXPECT_EQ(q_computed, q_u256); + EXPECT_EQ(r_computed, r_u256); + + // This edge case leads to the carry limb being > 2^70, so it used to fail when appying a 2^70 range check + // (with range_constrain_two_limbs). Now it should work since we fallback to default range checks in such a case. + const auto [lo_1_idx, hi_1_idx] = + helper_non_native_multiplication(builder, a_u256, b_u256, q_u256, r_u256, modulus); + + // Range check the carry (output) lo and hi limbs + const bool is_high_70_bits = uint256_t(builder.get_variable(hi_1_idx)).get_msb() < 70; + ASSERT(is_high_70_bits == false); // Regression should hit this case + + // Decompose into default range: these should work even if the limbs are > 2^70 + builder.decompose_into_default_range(lo_1_idx, 72); + builder.decompose_into_default_range(hi_1_idx, 72); + bool result_a = CircuitChecker::check(builder); + EXPECT_EQ(result_a, true); + + // Using NNF range check should fail here + builder.range_constrain_two_limbs(lo_1_idx, hi_1_idx, 70, 70); + bool result_b = CircuitChecker::check(builder); + EXPECT_EQ(result_b, false); + EXPECT_EQ(builder.err(), "range_constrain_two_limbs: hi limb."); } /** @@ -653,37 +734,19 @@ TEST(UltraCircuitBuilder, NonNativeFieldMultiplicationSortCheck) uint256_t q(q_big.lo.lo); uint256_t r(r_big.lo.lo); - const auto split_into_limbs = [&](const uint512_t& input) { - constexpr size_t NUM_BITS = 68; - std::array limbs; - limbs[0] = input.slice(0, NUM_BITS).lo; - limbs[1] = input.slice(NUM_BITS * 1, NUM_BITS * 2).lo; - limbs[2] = input.slice(NUM_BITS * 2, NUM_BITS * 3).lo; - limbs[3] = input.slice(NUM_BITS * 3, NUM_BITS * 4).lo; - return limbs; - }; - - const auto get_limb_witness_indices = [&](const std::array& limbs) { - std::array limb_indices; - limb_indices[0] = builder.add_variable(limbs[0]); - limb_indices[1] = builder.add_variable(limbs[1]); - limb_indices[2] = builder.add_variable(limbs[2]); - limb_indices[3] = builder.add_variable(limbs[3]); - return limb_indices; - }; - const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4); - auto modulus_limbs = split_into_limbs(BINARY_BASIS_MODULUS - uint512_t(modulus)); - - const auto a_indices = get_limb_witness_indices(split_into_limbs(uint256_t(a))); - const auto b_indices = get_limb_witness_indices(split_into_limbs(uint256_t(b))); - const auto q_indices = get_limb_witness_indices(split_into_limbs(uint256_t(q))); - const auto r_indices = get_limb_witness_indices(split_into_limbs(uint256_t(r))); - - non_native_multiplication_witnesses inputs{ - a_indices, b_indices, q_indices, r_indices, modulus_limbs, - }; - const auto [lo_1_idx, hi_1_idx] = builder.evaluate_non_native_field_multiplication(inputs); - builder.range_constrain_two_limbs(lo_1_idx, hi_1_idx, 70, 70); + const auto [lo_1_idx, hi_1_idx] = helper_non_native_multiplication(builder, a, b, q, r, modulus); + + // Range check the carry (output) lo and hi limbs + const bool is_low_70_bits = uint256_t(builder.get_variable(lo_1_idx)).get_msb() < 70; + const bool is_high_70_bits = uint256_t(builder.get_variable(hi_1_idx)).get_msb() < 70; + if (is_low_70_bits && is_high_70_bits) { + // Uses more efficient NNF range check if both limbs are < 2^70 + builder.range_constrain_two_limbs(lo_1_idx, hi_1_idx, 70, 70); + } else { + // Fallback to default range checks + builder.decompose_into_default_range(lo_1_idx, 72); + builder.decompose_into_default_range(hi_1_idx, 72); + } bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); @@ -762,9 +825,9 @@ TEST(UltraCircuitBuilder, RamSimple) // Use the result in a simple arithmetic gate builder.create_big_add_gate({ a_idx, - builder.zero_idx, - builder.zero_idx, - builder.zero_idx, + builder.zero_idx(), + builder.zero_idx(), + builder.zero_idx(), -1, 0, 0, @@ -823,9 +886,9 @@ TEST(UltraCircuitBuilder, Ram) true); builder.create_big_add_gate( { - builder.zero_idx, - builder.zero_idx, - builder.zero_idx, + builder.zero_idx(), + builder.zero_idx(), + builder.zero_idx(), e_idx, 0, 0, @@ -889,9 +952,9 @@ TEST(UltraCircuitBuilder, CheckCircuitShowcase) uint32_t b = builder.add_variable(0xbeef); // Let's create 2 gates that will bind these 2 variables to be one these two values builder.create_poly_gate( - { a, a, builder.zero_idx, fr(1), -fr(0xdead) - fr(0xbeef), 0, 0, fr(0xdead) * fr(0xbeef) }); + { a, a, builder.zero_idx(), fr(1), -fr(0xdead) - fr(0xbeef), 0, 0, fr(0xdead) * fr(0xbeef) }); builder.create_poly_gate( - { b, b, builder.zero_idx, fr(1), -fr(0xdead) - fr(0xbeef), 0, 0, fr(0xdead) * fr(0xbeef) }); + { b, b, builder.zero_idx(), fr(1), -fr(0xdead) - fr(0xbeef), 0, 0, fr(0xdead) * fr(0xbeef) }); // We can check if this works EXPECT_EQ(CircuitChecker::check(builder), true); diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp index dca6917b51d9..752a37d04356 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp @@ -22,8 +22,9 @@ UltraCircuitBuilder_ UltraCircuitChecker::prepare_cir { // Create a copy of the input circuit UltraCircuitBuilder_ builder{ builder_in }; - - builder.finalize_circuit(/*ensure_nonzero=*/true); // Test the ensure_nonzero gates as well + if (!builder.circuit_finalized) { // avoid warnings about finalizing an already finalized circuit + builder.finalize_circuit(/*ensure_nonzero=*/true); // Test the ensure_nonzero gates as well + } return builder; } diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/acir_bincode_mocks.hpp b/barretenberg/cpp/src/barretenberg/client_ivc/acir_bincode_mocks.hpp index c1b209c12d86..3ea3ac20c9a4 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/acir_bincode_mocks.hpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/acir_bincode_mocks.hpp @@ -9,6 +9,44 @@ namespace bb::acir_bincode_mocks { const size_t BIT_COUNT = 254; +inline uint8_t hex_char_to_value(char c) +{ + if (c >= '0' && c <= '9') { + return static_cast(c - '0'); + } + if (c >= 'a' && c <= 'f') { + return static_cast(10 + (c - 'a')); + } + if (c >= 'A' && c <= 'F') { + return static_cast(10 + (c - 'A')); + } + throw std::invalid_argument(std::string("Invalid hex character: '") + c + "'"); +} + +// Converts a hex string (must have even length) to a vector +inline std::vector hex_string_to_bytes(const std::string& str) +{ + // Allow optional "0x" or "0X" prefix + size_t offset = 0; + if (str.size() >= 2 && (str[0] == '0') && (str[1] == 'x' || str[1] == 'X')) { + offset = 2; + } + size_t hex_len = str.size() - offset; + // Enforce that the input string must represent exactly 32 bytes (64 hex chars) + if (hex_len != 64) { + throw std::invalid_argument( + "Hex string must be exactly 64 characters (32 bytes), excluding optional 0x prefix"); + } + std::vector bytes; + bytes.reserve(32); + for (size_t i = 0; i < hex_len; i += 2) { + uint8_t high = hex_char_to_value(str[offset + i]); + uint8_t low = hex_char_to_value(str[offset + i + 1]); + bytes.push_back(static_cast((high << 4) | low)); + } + return bytes; +} + /** * @brief Helper function to create a minimal circuit bytecode and witness for testing * @return A pair of (circuit_bytecode, witness_data) @@ -16,31 +54,36 @@ const size_t BIT_COUNT = 254; * The circuit implements: w0 * w1 = w2 * Example witness: w0=2, w1=3, w2=6 (so 2*3=6) */ -inline std::pair, std::vector> create_simple_circuit_bytecode() +inline std::pair, std::vector> create_simple_circuit_bytecode(size_t num_constraints = 1) { Acir::Circuit circuit; // No public inputs circuit.public_parameters = Acir::PublicInputs{ {} }; - std::string one = "0000000000000000000000000000000000000000000000000000000000000001"; - std::string minus_one = "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"; - - Acir::Expression expr; - - // Create constraint: w0 * w1 - w2 = 0 - expr.mul_terms = { { one, Acir::Witness{ 0 }, Acir::Witness{ 1 } } }; // w0 * w1 - expr.linear_combinations = { { minus_one, Acir::Witness{ 2 } } }; // -1 * w2 - expr.q_c = "0000000000000000000000000000000000000000000000000000000000000000"; - - Acir::Opcode::AssertZero assert_zero; - assert_zero.value = expr; - Acir::Opcode opcode; - opcode.value = assert_zero; - circuit.opcodes.push_back(opcode); + std::vector one = hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000001"); + std::vector minus_one = + hex_string_to_bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"); + + // Add num_constraints identical constraints, each using different witnesses + for (size_t i = 0; i < num_constraints; ++i) { + Acir::Expression expr; + uint32_t base_witness = static_cast(i * 3); + + // Create constraint: w[base] * w[base+1] - w[base+2] = 0 + expr.mul_terms = { { one, Acir::Witness{ base_witness }, Acir::Witness{ base_witness + 1 } } }; + expr.linear_combinations = { { minus_one, Acir::Witness{ base_witness + 2 } } }; + expr.q_c = hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000000"); + + Acir::Opcode::AssertZero assert_zero; + assert_zero.value = expr; + Acir::Opcode opcode; + opcode.value = assert_zero; + circuit.opcodes.push_back(opcode); + } - circuit.current_witness_index = 3; - circuit.expression_width = Acir::ExpressionWidth{ Acir::ExpressionWidth::Unbounded{} }; + circuit.current_witness_index = static_cast(num_constraints * 3); + circuit.function_name = "simple_circuit"; circuit.private_parameters = {}; circuit.return_values = Acir::PublicInputs{ {} }; circuit.assert_messages = {}; @@ -54,12 +97,16 @@ inline std::pair, std::vector> create_simple_circu Witnesses::WitnessStack witness_stack; Witnesses::StackItem stack_item{}; - // w0=2, w1=3, w2=6 (so 2*3=6) - stack_item.witness.value = { - { Witnesses::Witness{ 0 }, "0000000000000000000000000000000000000000000000000000000000000002" }, // w0 = 2 - { Witnesses::Witness{ 1 }, "0000000000000000000000000000000000000000000000000000000000000003" }, // w1 = 3 - { Witnesses::Witness{ 2 }, "0000000000000000000000000000000000000000000000000000000000000006" } // w2 = 6 - }; + // For each constraint, add witnesses: w[i*3]=2, w[i*3+1]=3, w[i*3+2]=6 (so 2*3=6) + for (size_t i = 0; i < num_constraints; ++i) { + uint32_t base_witness = static_cast(i * 3); + stack_item.witness.value[Witnesses::Witness{ base_witness }] = + hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000002"); // = 2 + stack_item.witness.value[Witnesses::Witness{ base_witness + 1 }] = + hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000003"); // = 3 + stack_item.witness.value[Witnesses::Witness{ base_witness + 2 }] = + hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000006"); // = 6 + } witness_stack.stack.push_back(stack_item); return { program.bincodeSerialize(), witness_stack.bincodeSerialize() }; @@ -76,29 +123,42 @@ inline std::vector create_simple_kernel(size_t vk_size, bool is_init_ke // Create witnesses equal to size of a mega VK in fields. std::vector vk_inputs; for (uint32_t i = 0; i < vk_size; i++) { - Acir::FunctionInput input{ { Acir::ConstantOrWitnessEnum::Witness{ i } }, BIT_COUNT }; - vk_inputs.push_back(input); + auto i_wit = std::variant{ + std::in_place_type, Acir::Witness{ i } + }; + vk_inputs.push_back(Acir::FunctionInput{ .value = i_wit }); + ; } - Acir::FunctionInput key_hash{ { Acir::ConstantOrWitnessEnum::Witness{ static_cast(vk_size) } }, - BIT_COUNT }; + + auto vk_size_wit = std::variant{ + std::in_place_type, Acir::Witness{ static_cast(vk_size) } + }; + Acir::FunctionInput key_hash{ .value = vk_size_wit }; + ; size_t total_num_witnesses = /* vk */ vk_size + /* key_hash */ 1; + auto predicate_const = std::variant{ + std::in_place_type, + hex_string_to_bytes("0000000000000000000000000000000000000000000000000000000000000001") + }; + Acir::FunctionInput predicate{ .value = predicate_const }; + // Modeled after noir-projects/mock-protocol-circuits/crates/mock-private-kernel-init/src/main.nr // We mock the init or tail kernels using OINK or PG respectively. Acir::BlackBoxFuncCall::RecursiveAggregation recursion{ .verification_key = vk_inputs, .proof = {}, .public_inputs = {}, .key_hash = key_hash, - .proof_type = is_init_kernel - ? acir_format::PROOF_TYPE::OINK - : acir_format::PROOF_TYPE::PG }; + .proof_type = is_init_kernel ? acir_format::PROOF_TYPE::OINK + : acir_format::PROOF_TYPE::PG, + .predicate = predicate }; Acir::BlackBoxFuncCall black_box_call; black_box_call.value = recursion; circuit.opcodes.push_back(Acir::Opcode{ Acir::Opcode::BlackBoxFuncCall{ black_box_call } }); circuit.current_witness_index = static_cast(total_num_witnesses); - circuit.expression_width = Acir::ExpressionWidth{ Acir::ExpressionWidth::Bounded{ 3 } }; + circuit.function_name = "simple_circuit"; // Create the program with the circuit Acir::Program program; @@ -119,12 +179,12 @@ inline std::vector create_kernel_witness(const std::vector& app for (uint32_t i = 0; i < app_vk_fields.size(); i++) { std::stringstream ss; ss << app_vk_fields[i]; - kernel_witness.stack.back().witness.value[Witnesses::Witness{ i }] = ss.str(); + kernel_witness.stack.back().witness.value[Witnesses::Witness{ i }] = hex_string_to_bytes(ss.str()); } std::stringstream ss; ss << crypto::Poseidon2::hash(app_vk_fields); kernel_witness.stack.back().witness.value[Witnesses::Witness{ static_cast(app_vk_fields.size()) }] = - ss.str(); + hex_string_to_bytes(ss.str()); return kernel_witness.bincodeSerialize(); } diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp index e49f84302035..1d07ead885d8 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp @@ -5,9 +5,9 @@ // ===================== #include "barretenberg/client_ivc/client_ivc.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/streams.hpp" -#include "barretenberg/honk/proving_key_inspector.hpp" +#include "barretenberg/honk/prover_instance_inspector.hpp" #include "barretenberg/serialize/msgpack_impl.hpp" #include "barretenberg/special_public_inputs/special_public_inputs.hpp" #include "barretenberg/ultra_honk/oink_prover.hpp" @@ -28,7 +28,6 @@ ClientIVC::ClientIVC(size_t num_circuits, TraceSettings trace_settings) size_t commitment_key_size = std::max(trace_settings.dyadic_size(), 1UL << TranslatorFlavor::CONST_TRANSLATOR_LOG_N); info("BN254 commitment key size: ", commitment_key_size); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1420): pass commitment keys by value bn254_commitment_key = CommitmentKey(commitment_key_size); } @@ -74,6 +73,48 @@ void ClientIVC::instantiate_stdlib_verification_queue( } } +std::shared_ptr ClientIVC::perform_oink_recursive_verification( + ClientCircuit& circuit, + const std::shared_ptr& verifier_instance, + const std::shared_ptr& transcript, + const StdlibProof& proof) +{ + OinkRecursiveVerifier verifier{ &circuit, verifier_instance, transcript }; + verifier.verify_proof(proof); + + verifier_instance->target_sum = StdlibFF::from_witness_index(&circuit, circuit.zero_idx()); + // Get the gate challenges for sumcheck/combiner computation + verifier_instance->gate_challenges = + transcript->template get_powers_of_challenge("gate_challenge", CONST_PG_LOG_N); + + return verifier_instance; +} + +std::shared_ptr ClientIVC::perform_pg_recursive_verification( + ClientCircuit& circuit, + const std::shared_ptr& verifier_accumulator, + const std::shared_ptr& verifier_instance, + const std::shared_ptr& transcript, + const StdlibProof& proof, + std::optional& prev_accum_hash, + bool is_kernel) +{ + BB_ASSERT_NEQ(verifier_accumulator, nullptr, "verifier_accumulator cannot be null in PG recursive verification"); + + // Fiat-Shamir the accumulator. (Only needs to be performed on the first in a series of recursive PG verifications + // within a given kernel and by convention the kernel proof is always verified first). + if (is_kernel) { + prev_accum_hash = verifier_accumulator->hash_through_transcript("", *transcript); + transcript->add_to_hash_buffer("accum_hash", *prev_accum_hash); + info("Previous accumulator hash in PG rec verifier: ", *prev_accum_hash); + } + // Perform folding recursive verification to update the verifier accumulator + FoldingRecursiveVerifier verifier{ &circuit, verifier_accumulator, verifier_instance, transcript }; + auto updated_verifier_accumulator = verifier.verify_folding_proof(proof); + + return updated_verifier_accumulator; +} + /** * @brief Populate the provided circuit with constraints for (1) recursive verification of the provided accumulation * proof and (2) the associated databus commitment consistency checks. @@ -89,108 +130,111 @@ void ClientIVC::instantiate_stdlib_verification_queue( * @param accumulation_recursive_transcript Transcript shared across recursive verification of the folding of * K_{i-1} (kernel), A_{i,1} (app), .., A_{i, n} (app) * - * @return Pair of PairingPoints for final verification and commitments to the merged tables as read from the proof by - * the Merge verifier + * @return Triple of output verifier accumulator, PairingPoints for final verification and commitments to the merged + * tables as read from the proof by the Merge verifier */ -std::pair ClientIVC:: - perform_recursive_verification_and_databus_consistency_checks( - ClientCircuit& circuit, - const StdlibVerifierInputs& verifier_inputs, - const TableCommitments& T_prev_commitments, - const std::shared_ptr& accumulation_recursive_transcript) +std::tuple, ClientIVC::PairingPoints, ClientIVC::TableCommitments> +ClientIVC::perform_recursive_verification_and_databus_consistency_checks( + ClientCircuit& circuit, + const StdlibVerifierInputs& verifier_inputs, + const std::shared_ptr& input_verifier_accumulator, + const TableCommitments& T_prev_commitments, + const std::shared_ptr& accumulation_recursive_transcript) { using MergeCommitments = Goblin::MergeRecursiveVerifier::InputCommitments; - // Witness commitments and public inputs corresponding to the incoming instance - WitnessCommitments witness_commitments; - std::vector public_inputs; + // The pairing points produced by the verification of the decider proof + PairingPoints decider_pairing_points; // Input commitments to be passed to the merge recursive verification - MergeCommitments merge_commitments; - merge_commitments.T_prev_commitments = T_prev_commitments; + MergeCommitments merge_commitments{ .T_prev_commitments = T_prev_commitments }; - switch (verifier_inputs.type) { - case QUEUE_TYPE::PG_TAIL: - case QUEUE_TYPE::PG: { - // Construct stdlib verifier accumulator from the native counterpart computed on a previous round - auto stdlib_verifier_accum = - std::make_shared(&circuit, recursive_verifier_native_accum); + auto verifier_instance = std::make_shared(&circuit, verifier_inputs.honk_vk_and_hash); - // Perform folding recursive verification to update the verifier accumulator - FoldingRecursiveVerifier verifier{ - &circuit, stdlib_verifier_accum, { verifier_inputs.honk_vk_and_hash }, accumulation_recursive_transcript - }; - auto verifier_accum = verifier.verify_folding_proof(verifier_inputs.proof); - - // Extract native verifier accumulator from the stdlib accum for use on the next round - recursive_verifier_native_accum = std::make_shared(verifier_accum->get_value()); - - witness_commitments = std::move(verifier.keys_to_fold[1]->witness_commitments); - public_inputs = std::move(verifier.public_inputs); - - break; - } + std::shared_ptr output_verifier_accumulator; + std::optional prev_accum_hash = std::nullopt; + // The decider proof exists if the tail kernel has been accumulated + bool is_hiding_kernel = !decider_proof.empty(); + switch (verifier_inputs.type) { case QUEUE_TYPE::OINK: { - // Construct an incomplete stdlib verifier accumulator from the corresponding stdlib verification key - auto verifier_accum = - std::make_shared(&circuit, verifier_inputs.honk_vk_and_hash); + BB_ASSERT_EQ(input_verifier_accumulator, nullptr); - // Perform oink recursive verification to complete the initial verifier accumulator - OinkRecursiveVerifier verifier{ &circuit, verifier_accum, accumulation_recursive_transcript }; - verifier.verify_proof(verifier_inputs.proof); - verifier_accum->is_accumulator = true; // indicate to PG that it should not run oink - - // Extract native verifier accumulator from the stdlib accum for use on the next round - recursive_verifier_native_accum = std::make_shared(verifier_accum->get_value()); - // Initialize the gate challenges to zero for use in first round of folding - recursive_verifier_native_accum->gate_challenges = std::vector(CONST_PG_LOG_N, 0); - - witness_commitments = std::move(verifier_accum->witness_commitments); - public_inputs = std::move(verifier.public_inputs); + output_verifier_accumulator = perform_oink_recursive_verification( + circuit, verifier_instance, accumulation_recursive_transcript, verifier_inputs.proof); // T_prev = 0 in the first recursive verification merge_commitments.T_prev_commitments = stdlib::recursion::honk::empty_ecc_op_tables(circuit); - + break; + } + case QUEUE_TYPE::PG: + case QUEUE_TYPE::PG_TAIL: { + output_verifier_accumulator = perform_pg_recursive_verification(circuit, + input_verifier_accumulator, + verifier_instance, + accumulation_recursive_transcript, + verifier_inputs.proof, + prev_accum_hash, + verifier_inputs.is_kernel); break; } case QUEUE_TYPE::PG_FINAL: { BB_ASSERT_EQ(stdlib_verification_queue.size(), size_t(1)); - BB_ASSERT_EQ(num_circuits_accumulated, - num_circuits, - "All circuits must be accumulated before constructing the hiding circuit."); - // Complete the hiding circuit construction - auto [pairing_points, merged_table_commitments] = - complete_hiding_circuit_logic(verifier_inputs.proof, verifier_inputs.honk_vk_and_hash, circuit); - // Return early since the hiding circuit method performs merge and public inputs handling - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1501): we should remove the code duplication for - // the consistency checks at some point - return { pairing_points, merged_table_commitments }; + + hide_op_queue_accumulation_result(circuit); + + auto final_verifier_accumulator = perform_pg_recursive_verification(circuit, + input_verifier_accumulator, + verifier_instance, + accumulation_recursive_transcript, + verifier_inputs.proof, + prev_accum_hash, + verifier_inputs.is_kernel); + // Perform recursive decider verification + DeciderRecursiveVerifier decider{ &circuit, final_verifier_accumulator, accumulation_recursive_transcript }; + decider_pairing_points = decider.verify_proof(decider_proof); + + BB_ASSERT_EQ(output_verifier_accumulator, nullptr); + break; } default: { - throw_or_abort("Invalid queue type! Only OINK, PG and PG_FINAL are supported"); + throw_or_abort("Invalid queue type! Only OINK, PG, PG_TAIL and PG_FINAL are supported"); } } + // Extract the witness commitments and public inputs from the incoming verifier instance + WitnessCommitments witness_commitments = std::move(verifier_instance->witness_commitments); + std::vector public_inputs = std::move(verifier_instance->public_inputs); + PairingPoints nested_pairing_points; // to be extracted from public inputs of app or kernel proof just verified if (verifier_inputs.is_kernel) { // Reconstruct the input from the previous kernel from its public inputs KernelIO kernel_input; // pairing points, databus return data commitments kernel_input.reconstruct_from_public(public_inputs); - nested_pairing_points = kernel_input.pairing_inputs; + // Perform databus consistency checks + kernel_input.kernel_return_data.incomplete_assert_equal(witness_commitments.calldata); + kernel_input.app_return_data.incomplete_assert_equal(witness_commitments.secondary_calldata); + // T_prev is read by the public input of the previous kernel K_{i-1} at the beginning of the recursive // verification of of the folding of K_{i-1} (kernel), A_{i,1} (app), .., A_{i, n} (app). This verification // happens in K_{i} - merge_commitments.T_prev_commitments = kernel_input.ecc_op_tables; - - // Perform databus consistency checks - kernel_input.kernel_return_data.assert_equal(witness_commitments.calldata); - kernel_input.app_return_data.assert_equal(witness_commitments.secondary_calldata); - - // Set the kernel return data commitment to be propagated via the public inputs - bus_depot.set_kernel_return_data_commitment(witness_commitments.return_data); + merge_commitments.T_prev_commitments = std::move(kernel_input.ecc_op_tables); + + BB_ASSERT_EQ(verifier_inputs.type == QUEUE_TYPE::PG || verifier_inputs.type == QUEUE_TYPE::PG_TAIL || + verifier_inputs.type == QUEUE_TYPE::PG_FINAL, + true, + "Kernel circuits should be folded."); + // Get the previous accum hash + info("PG accum hash from IO: ", kernel_input.output_pg_accum_hash); + ASSERT(prev_accum_hash.has_value()); + kernel_input.output_pg_accum_hash.assert_equal(*prev_accum_hash); + + if (!is_hiding_kernel) { + // The hiding kernel has no return data; it uses the traditional public-inputs mechanism + bus_depot.set_kernel_return_data_commitment(witness_commitments.return_data); + } } else { // Reconstruct the input from the previous app from its public inputs AppIO app_input; // pairing points @@ -209,8 +253,14 @@ std::pair ClientIVC:: goblin.recursively_verify_merge(circuit, merge_commitments, accumulation_recursive_transcript); pairing_points.aggregate(nested_pairing_points); + if (is_hiding_kernel) { + pairing_points.aggregate(decider_pairing_points); + // Add randomness at the end of the hiding kernel (whose ecc ops fall right at the end of the op queue table) to + // ensure the CIVC proof doesn't leak information about the actual content of the op queue + hide_op_queue_content_in_hiding(circuit); + } - return { pairing_points, merged_table_commitments }; + return { output_verifier_accumulator, pairing_points, merged_table_commitments }; } /** @@ -236,58 +286,156 @@ void ClientIVC::complete_kernel_circuit_logic(ClientCircuit& circuit) instantiate_stdlib_verification_queue(circuit); } + bool is_init_kernel = + stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::OINK); + + bool is_tail_kernel = + stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::PG_TAIL); + bool is_hiding_kernel = stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::PG_FINAL); - // If the incoming circuit is a kernel, start its subtable with an eq and reset operation to ensure a - // neighbouring misconfigured subtable coming from an app cannot affect the operations in the - // current subtable. We don't do this for the hiding kernel as it succeeds another kernel and because the hiding - // kernel has to start with a no-op for the correct functioning of translator. - if (!is_hiding_kernel) { - circuit.queue_ecc_eq(); + // The ECC-op subtable for a kernel begins with an eq-and-reset to ensure that the preceeding circuit's subtable + // cannot affect the ECC-op accumulator for the kernel. For the tail kernel, we additionally add a preceeding no-op + // to ensure the op queue wires in translator are shiftable, i.e. their 0th coefficient is 0. (The tail kernel + // subtable is at the top of the final aggregate table since it is the last to be prepended). + if (is_tail_kernel) { + BB_ASSERT_EQ(circuit.op_queue->get_current_subtable_size(), + 0U, + "tail kernel ecc ops table should be empty at this point"); + circuit.queue_ecc_no_op(); + // Add randomness at the begining of the tail kernel (whose ecc ops fall at the beginning of the op queue table) + // to ensure the CIVC proof doesn't leak information about the actual content of the op queue + hide_op_queue_content_in_tail(circuit); } + circuit.queue_ecc_eq(); + // Perform Oink/PG and Merge recursive verification + databus consistency checks for each entry in the queue PairingPoints points_accumulator; + std::shared_ptr current_stdlib_verifier_accumulator = nullptr; + if (!is_init_kernel) { + current_stdlib_verifier_accumulator = + std::make_shared(&circuit, recursive_verifier_native_accum); + } while (!stdlib_verification_queue.empty()) { const StdlibVerifierInputs& verifier_input = stdlib_verification_queue.front(); - auto [pairing_points, merged_table_commitments] = perform_recursive_verification_and_databus_consistency_checks( - circuit, verifier_input, T_prev_commitments, accumulation_recursive_transcript); - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1376): Optimize recursion aggregation - seems - // we can use `batch_mul` here to decrease the size of the `ECCOpQueue`, but must be cautious with FS security. + auto [output_stdlib_verifier_accumulator, pairing_points, merged_table_commitments] = + perform_recursive_verification_and_databus_consistency_checks(circuit, + verifier_input, + current_stdlib_verifier_accumulator, + T_prev_commitments, + accumulation_recursive_transcript); points_accumulator.aggregate(pairing_points); - // Update commitment to the status of the op_queue T_prev_commitments = merged_table_commitments; + // Update the output verifier accumulator + current_stdlib_verifier_accumulator = output_stdlib_verifier_accumulator; stdlib_verification_queue.pop_front(); } - // Set the kernel output data to be propagated via the public inputs if (is_hiding_kernel) { + BB_ASSERT_EQ(current_stdlib_verifier_accumulator, nullptr); HidingKernelIO hiding_output{ points_accumulator, T_prev_commitments }; hiding_output.set_public(); - // preserve the hiding circuit so a proof for it can be created - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1502): reconsider approach once integration is - // complete - hiding_circuit = std::make_unique(circuit); } else { + BB_ASSERT_NEQ(current_stdlib_verifier_accumulator, nullptr); + // Extract native verifier accumulator from the stdlib accum for use on the next round + recursive_verifier_native_accum = + std::make_shared(current_stdlib_verifier_accumulator->get_value()); + KernelIO kernel_output; kernel_output.pairing_inputs = points_accumulator; kernel_output.kernel_return_data = bus_depot.get_kernel_return_data_commitment(circuit); kernel_output.app_return_data = bus_depot.get_app_return_data_commitment(circuit); kernel_output.ecc_op_tables = T_prev_commitments; - + RecursiveTranscript hash_transcript; + kernel_output.output_pg_accum_hash = + current_stdlib_verifier_accumulator->hash_through_transcript("", hash_transcript); + info("kernel output pg hash: ", kernel_output.output_pg_accum_hash); kernel_output.set_public(); } } +HonkProof ClientIVC::construct_oink_proof(const std::shared_ptr& prover_instance, + const std::shared_ptr& honk_vk, + const std::shared_ptr& transcript) +{ + vinfo("computing oink proof..."); + MegaOinkProver oink_prover{ prover_instance, honk_vk, transcript }; + oink_prover.prove(); + + prover_instance->target_sum = 0; + // Get the gate challenges for sumcheck/combiner computation + prover_instance->gate_challenges = + prover_accumulation_transcript->template get_powers_of_challenge("gate_challenge", CONST_PG_LOG_N); + + prover_accumulator = prover_instance; // initialize the prover accum with the completed key + + HonkProof oink_proof = oink_prover.export_proof(); + vinfo("oink proof constructed"); + return oink_proof; +} + +HonkProof ClientIVC::construct_pg_proof(const std::shared_ptr& prover_instance, + const std::shared_ptr& honk_vk, + const std::shared_ptr& transcript, + bool is_kernel) +{ + vinfo("computing pg proof..."); + // Only fiat shamir if this is a kernel with the assumption that kernels are always the first being recursively + // verified. + if (is_kernel) { + // Fiat-Shamir the verifier accumulator + FF accum_hash = native_verifier_accum->hash_through_transcript("", *prover_accumulation_transcript); + prover_accumulation_transcript->add_to_hash_buffer("accum_hash", accum_hash); + info("Accumulator hash in PG prover: ", accum_hash); + } + auto verifier_instance = std::make_shared>(honk_vk); + FoldingProver folding_prover({ prover_accumulator, prover_instance }, + { native_verifier_accum, verifier_instance }, + transcript, + trace_usage_tracker); + auto output = folding_prover.prove(); + prover_accumulator = output.accumulator; // update the prover accumulator + vinfo("pg proof constructed"); + return output.proof; +} + +/** + * @brief Get queue type for the proof of a circuit about to be accumulated based on num circuits accumulated so far. + */ +ClientIVC::QUEUE_TYPE ClientIVC::get_queue_type() const +{ + // first app + if (num_circuits_accumulated == 0) { + return QUEUE_TYPE::OINK; + } + // app (excluding first) or kernel (inner or reset) + if ((num_circuits_accumulated > 0 && num_circuits_accumulated < num_circuits - 3)) { + return QUEUE_TYPE::PG; + } + // last kernel prior to tail kernel + if ((num_circuits_accumulated == num_circuits - 3)) { + return QUEUE_TYPE::PG_TAIL; + } + // tail kernel + if ((num_circuits_accumulated == num_circuits - 2)) { + return QUEUE_TYPE::PG_FINAL; + } + // hiding kernel + if ((num_circuits_accumulated == num_circuits - 1)) { + return QUEUE_TYPE::MEGA; + } + return QUEUE_TYPE{}; +} + /** * @brief Execute prover work for accumulation - * @details Construct an proving key for the provided circuit. If this is the first step in the IVC, simply initialize - * the folding accumulator. Otherwise, execute the PG prover to fold the proving key into the accumulator and produce a - * folding proof. Also execute the merge protocol to produce a merge proof. + * @details Construct an prover instance for the provided circuit. If this is the first step in the IVC, simply + * initialize the folding accumulator. Otherwise, execute the PG prover to fold the prover instance into the accumulator + * and produce a folding proof. Also execute the merge protocol to produce a merge proof. * * @param circuit * this case, just produce a Honk proof for that circuit and do no folding. @@ -298,24 +446,21 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr proving_key = std::make_shared(circuit, trace_settings); + // Construct the prover instance for circuit + std::shared_ptr prover_instance = std::make_shared(circuit, trace_settings); // If the current circuit overflows past the current size of the commitment key, reinitialize accordingly. // TODO(https://github.com/AztecProtocol/barretenberg/issues/1319) - if (proving_key->dyadic_size() > bn254_commitment_key.dyadic_size) { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1420): pass commitment keys by value - bn254_commitment_key = CommitmentKey(proving_key->dyadic_size()); + if (prover_instance->dyadic_size() > bn254_commitment_key.dyadic_size) { + bn254_commitment_key = CommitmentKey(prover_instance->dyadic_size()); goblin.commitment_key = bn254_commitment_key; } - proving_key->commitment_key = bn254_commitment_key; + prover_instance->commitment_key = bn254_commitment_key; trace_usage_tracker.update(circuit); - honk_vk = precomputed_vk; - - // We're acccumulating a kernel if the verification queue is empty (because the kernel circuit contains recursive + // We're accumulating a kernel if the verification queue is empty (because the kernel circuit contains recursive // verifiers for all the entries previously present in the verification queue) and if it's not the first accumulate // call (which will always be for an app circuit). bool is_kernel = verification_queue.empty() && num_circuits_accumulated > 0; @@ -326,81 +471,54 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr(); } - VerifierInputs queue_entry{ .honk_vk = honk_vk, - // first circuit accumulated should be an app - .is_kernel = is_kernel }; - if (num_circuits_accumulated == 0) { // First circuit in the IVC - BB_ASSERT_EQ(queue_entry.is_kernel, false, "First circuit accumulated is always be an app"); - // For first circuit in the IVC, use oink to complete the decider proving key and generate an oink proof - auto oink_verifier_transcript = - Transcript::convert_prover_transcript_to_verifier_transcript(prover_accumulation_transcript); - MegaOinkProver oink_prover{ proving_key, honk_vk, prover_accumulation_transcript }; - vinfo("computing oink proof..."); - oink_prover.prove(); - HonkProof oink_proof = oink_prover.export_proof(); - vinfo("oink proof constructed"); - proving_key->is_accumulator = true; // indicate to PG that it should not run oink on this key - // Initialize the gate challenges to zero for use in first round of folding - proving_key->gate_challenges = std::vector(CONST_PG_LOG_N, 0); - - fold_output.accumulator = proving_key; // initialize the prover accum with the completed key - - auto decider_vk = std::make_shared(honk_vk); - oink_verifier_transcript->load_proof(oink_proof); - OinkVerifier oink_verifier{ decider_vk, oink_verifier_transcript }; - oink_verifier.verify(); - native_verifier_accum = decider_vk; - native_verifier_accum->is_accumulator = true; - native_verifier_accum->gate_challenges = std::vector(CONST_PG_LOG_N, 0); - - queue_entry.type = QUEUE_TYPE::OINK; - queue_entry.proof = oink_proof; - } else { // Otherwise, fold the new key into the accumulator - vinfo("computing folding proof"); - auto vk = std::make_shared>(honk_vk); - // make a copy of the prover_accumulation_transcript for the verifier to use - auto verifier_accumulation_transcript = - Transcript::convert_prover_transcript_to_verifier_transcript(prover_accumulation_transcript); - - FoldingProver folding_prover({ fold_output.accumulator, proving_key }, - { native_verifier_accum, vk }, - prover_accumulation_transcript, - trace_usage_tracker); - fold_output = folding_prover.prove(); - vinfo("constructed folding proof"); - FoldingVerifier folding_verifier({ native_verifier_accum, vk }, verifier_accumulation_transcript); - native_verifier_accum = folding_verifier.verify_folding_proof(fold_output.proof); - - if (num_circuits_accumulated == num_circuits - 1) { - // we are folding in the "Tail" kernel, so the verification_queue entry should have type PG_FINAL - queue_entry.type = QUEUE_TYPE::PG_FINAL; - decider_proof = decider_prove(); - vinfo("constructed decider proof"); - } else if (num_circuits_accumulated == num_circuits - 2) { - // we are folding in the "Inner/Reset" kernel, so the verification_queue entry should have type PG_TAIL - queue_entry.type = QUEUE_TYPE::PG_TAIL; - } else { - queue_entry.type = QUEUE_TYPE::PG; - } - queue_entry.proof = fold_output.proof; + // make a copy of the prover_accumulation_transcript for the verifier to use + auto verifier_transcript = + Transcript::convert_prover_transcript_to_verifier_transcript(prover_accumulation_transcript); + + QUEUE_TYPE queue_type = get_queue_type(); + HonkProof proof; + switch (queue_type) { + case QUEUE_TYPE::OINK: + vinfo("Accumulating first app circuit with OINK"); + BB_ASSERT_EQ(is_kernel, false, "First circuit accumulated must always be an app"); + proof = construct_oink_proof(prover_instance, precomputed_vk, prover_accumulation_transcript); + break; + case QUEUE_TYPE::PG: + case QUEUE_TYPE::PG_TAIL: + proof = construct_pg_proof(prover_instance, precomputed_vk, prover_accumulation_transcript, is_kernel); + break; + case QUEUE_TYPE::PG_FINAL: + proof = construct_pg_proof(prover_instance, precomputed_vk, prover_accumulation_transcript, is_kernel); + decider_proof = construct_decider_proof(prover_accumulation_transcript); + break; + case QUEUE_TYPE::MEGA: + proof = construct_honk_proof_for_hiding_kernel(circuit, precomputed_vk); + break; } + + VerifierInputs queue_entry{ std::move(proof), precomputed_vk, queue_type, is_kernel }; verification_queue.push_back(queue_entry); - // Construct merge proof for the present circuit - goblin.prove_merge(prover_accumulation_transcript); + // Update native verifier accumulator and construct merge proof (excluded for hiding kernel since PG terminates with + // tail kernel and hiding merge proof is constructed as part of goblin proving) + if (queue_entry.type != QUEUE_TYPE::MEGA) { + update_native_verifier_accumulator(queue_entry, verifier_transcript); + goblin.prove_merge(prover_accumulation_transcript); + } num_circuits_accumulated++; } /** - * @brief Add a random operation to the op queue to hide its content in Translator computation. + * @brief Add a valid operation with random data to the op queue to prevent information leakage in Translator + * proof. * - * @details Translator circuit builder computes the evaluation at some random challenge x of a batched polynomial - * derived from processing the ultra_op version of op_queue. This result (referred to as accumulated_result in - * translator) is included in the translator proof and, on the verifier side, checked against the same computation - * performed by ECCVM (this is done in verify_translation). To prevent leaking information about the actual - * accumulated_result (and implicitly about the ops) when the proof is sent to the rollup, a random but valid operation - * is added to the op queue, to ensure the polynomial over Grumpkin, whose evaluation is accumulated_result, has at + * @details The Translator circuit builder evaluates a batched polynomial (representing the four op queue polynomials + * in UltraOp format) at a random challenge x. This evaluation result (called accumulated_result in translator) is + * included in the translator proof and verified against the equivalent computation performed by ECCVM (in + * verify_translation, establishing equivalence between ECCVM and UltraOp format). To ensure the accumulated_result + * doesn't reveal information about actual ecc operations in the transaction, when the proof is sent to the rollup, we + * add a random yet valid operation to the op queue. This guarantees the batched polynomial over Grumpkin contains at * least one random coefficient. */ void ClientIVC::hide_op_queue_accumulation_result(ClientCircuit& circuit) @@ -411,96 +529,83 @@ void ClientIVC::hide_op_queue_accumulation_result(ClientCircuit& circuit) circuit.queue_ecc_eq(); } -std::pair ClientIVC::complete_hiding_circuit_logic( - const StdlibProof& stdlib_proof, - const std::shared_ptr& stdlib_vk_and_hash, - ClientCircuit& circuit) +/** + * @brief Adds three random ops to the tail kernel. + * + * @note The explanation below does not serve as a proof of zero-knowledge but rather as intuition for why the number + * of random ops and their position in the op queue. + * + * @details The ClientIVC proof is sent to the rollup and so it has to be zero-knowledge. In turn, this implies that + * commitments and evaluations to the op queue, when regarded as 4 polynomials in UltraOp format (op, x_lo_y_hi, + * x_hi_z_1, y_lo_z_2), should not leak information about the actual content of the op queue with provenance from + * circuit operations that have been accumulated in CIVC. Since the op queue is used across several provers, + * randomising these polynomials has to be handled in a special way. Normally, to hide a witness we'd add random + * coefficients at proving time when populating ProverPolynomials. However, due to the consistency checks present + * throughout CIVC, to ensure all components use the same op queue data (Merge and Translator on the entire op queue + * table and Merge and Oink on each subtable), randomness has to be added in a common place, this place naturally + * being ClientIVC. ECCVM is not affected by the concerns above, randomness being added to wires at proving time as per + * usual, because the consistency of ECCVMOps processing and UltraOps processing between Translator and ECCVM is + * achieved via the translation evaluation check and avoiding an information leak there is ensured by + * `ClientIVC::hide_op_queue_accumulation_result()` and SmallSubgroupIPA in ECCVM. + * + * We need each op queue polynomial to have 9 random coefficients (so the op queue needs to contain 5 random ops, every + * UltraOp adding two coefficients to each of the 4 polynomials). + * + * For the last subtable of ecc ops belonging to the hiding kernel, merged via appended to the full op queue, its data + * appears as the ecc_op_wires in the MegaZK proof, wires that are not going to be shifted, so the proof contains, + * for each wire, its commitment and evaluation to the Sumcheck challenge. As at least 3 random coefficients are + * needed in each op queue polynomial, we add 2 random ops to the hiding kernel. + * + * The op queue state previous to the append of the last subtable, is the `left_table` in the merge protocol, so for + * the degree check, we construct its inverse polynomial `left_table_inverse`. The MergeProof will contain the + * commitment to the `left_table_inverse` plus its evaluation at Merge protocol challenge κ. Also for the degree check, + * prover needs to send the evaluation of the `left_table` at κ⁻¹. We need to ensure random coefficients are added to + * one of the kernels as not to affect Apps verification keys so the best choice is to add them to the beginning of the + * tail kernel as to not complicate Translator relations. The above advises that another 4 random coefficients are + * needed in the `left_table` (so, 2 random ops). + * + * Finally, the 4 polynomials representing the full ecc op queue table are committed to (in fact, in both Merge + * protocol and Translator but they are commitments to the same data). `x_lo_y_hi`, `x_hi_z_1` and `x_lo_z_2` are + * shifted polynomials in Translator so the Translator proof will contain their evaluation and evaluation of their + * shifts at the Sumcheck challenge. On top of that, the Shplonk proof sent in the last iteration of Merge also + * ascertains the opening of partially_evaluated_difference = left_table + κ^{shift -1 } * right_table - merged_table + * at κ is 0, so a batched quotient commitment is sent in the Merge proof. In total, for each op queue polynomial (or + * parts of its data), there are 4 commitments and 5 evaluations across the CIVC proof so the sweet spot is 5 random + * ops. + */ +void ClientIVC::hide_op_queue_content_in_tail(ClientCircuit& circuit) { - using MergeCommitments = Goblin::MergeRecursiveVerifier::InputCommitments; - trace_usage_tracker.print(); - - // Shared transcript between PG and Merge - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1453): Investigate whether Decider/PG/Merge need to - // share a transcript - std::shared_ptr pg_merge_transcript = std::make_shared(); - - // Add a no-op at the beginning of the hiding circuit to ensure the wires representing the op queue in translator - // circuit are shiftable polynomials, i.e. their 0th coefficient is equal to 0. - circuit.queue_ecc_no_op(); - - hide_op_queue_accumulation_result(circuit); - - // Construct stdlib accumulator, decider vkey and folding proof - auto stdlib_verifier_accumulator = - std::make_shared(&circuit, recursive_verifier_native_accum); - - // Propagate the public inputs of the tail kernel by converting them to public inputs of the hiding circuit. - auto num_public_inputs = static_cast(honk_vk->num_public_inputs); - num_public_inputs -= KernelIO::PUBLIC_INPUTS_SIZE; // exclude fixed kernel_io public inputs - for (size_t i = 0; i < num_public_inputs; i++) { - stdlib_proof[i].set_public(); - } - - // Perform recursive folding verification of the last folding proof - FoldingRecursiveVerifier folding_verifier{ - &circuit, stdlib_verifier_accumulator, { stdlib_vk_and_hash }, pg_merge_transcript - }; - auto recursive_verifier_native_accum = folding_verifier.verify_folding_proof(stdlib_proof); - verification_queue.clear(); - - // Get the completed decider verification key corresponding to the tail kernel from the folding verifier - const std::vector& public_inputs = folding_verifier.public_inputs; - WitnessCommitments& witness_commitments = folding_verifier.keys_to_fold[1]->witness_commitments; - - // Reconstruct the KernelIO from the public inputs of the tail kernel and perform databus consistency checks - KernelIO kernel_input; // pairing points, databus return data commitments - kernel_input.reconstruct_from_public(public_inputs); - kernel_input.kernel_return_data.assert_equal(witness_commitments.calldata); - kernel_input.app_return_data.assert_equal(witness_commitments.secondary_calldata); - - // Extract the commitments to the subtable corresponding to the incoming circuit - MergeCommitments merge_commitments; - merge_commitments.t_commitments = witness_commitments.get_ecc_op_wires().get_copy(); - merge_commitments.T_prev_commitments = std::move( - kernel_input.ecc_op_tables); // Commitment to the status of the op_queue before folding the tail kernel - // Perform recursive verification of the last merge proof - auto [points_accumulator, merged_table_commitments] = - goblin.recursively_verify_merge(circuit, merge_commitments, pg_merge_transcript); - - points_accumulator.aggregate(kernel_input.pairing_inputs); - - // Perform recursive decider verification - DeciderRecursiveVerifier decider{ &circuit, recursive_verifier_native_accum }; - BB_ASSERT_EQ(!decider_proof.empty(), true, "Decider proof is empty!"); - PairingPoints decider_pairing_points = decider.verify_proof(decider_proof); - points_accumulator.aggregate(decider_pairing_points); - return { points_accumulator, merged_table_commitments }; + circuit.queue_ecc_random_op(); + circuit.queue_ecc_random_op(); + circuit.queue_ecc_random_op(); } /** - * @brief Construct the proving key of the hiding circuit, from the hiding_circuit builder in the client_ivc class + * @brief Adds two random ops to the hiding kernel. + * + * @details For the last subtable of ecc ops belonging to the hiding kernel, merged via appended to the full op + * queue, its data appears as the ecc_op_wires in the MegaZK proof, wires that are not going to be shifted, so the proof + * containts, for each wire, its commitment and evaluation to the Sumcheck challenge. As at least 3 random coefficients + * are needed in each op queue polynomial, we add 2 random ops. More details in `hide_op_queue_content_in_tail`. */ -std::shared_ptr ClientIVC::compute_hiding_circuit_proving_key() +void ClientIVC::hide_op_queue_content_in_hiding(ClientCircuit& circuit) { - auto hiding_decider_pk = - std::make_shared(*hiding_circuit, TraceSettings(), bn254_commitment_key); - return hiding_decider_pk; + circuit.queue_ecc_random_op(); + circuit.queue_ecc_random_op(); } /** - * @brief Construct a zero-knowledge proof for the hiding circuit, which recursively verifies the last folding, merge - * and decider proof. - * - * @return HonkProof - a ZK Mega proof + * @brief Construct a zero-knowledge proof for the hiding circuit, which recursively verifies the last folding, + * merge and decider proof. */ -HonkProof ClientIVC::prove_hiding_circuit() +HonkProof ClientIVC::construct_honk_proof_for_hiding_kernel( + ClientCircuit& circuit, const std::shared_ptr& verification_key) { - ASSERT(hiding_circuit != nullptr, "hiding circuit should have been constructed before attempted to create its key"); - auto hiding_decider_pk = compute_hiding_circuit_proving_key(); - honk_vk = std::make_shared(hiding_decider_pk->get_precomputed()); - auto& hiding_circuit_vk = honk_vk; + // Note: a structured trace is not used for the hiding kernel + auto hiding_prover_inst = std::make_shared(circuit, TraceSettings(), bn254_commitment_key); + // Hiding circuit is proven by a MegaZKProver - MegaZKProver prover(hiding_decider_pk, hiding_circuit_vk, transcript); + MegaZKProver prover(hiding_prover_inst, verification_key, transcript); HonkProof proof = prover.construct_proof(); return proof; @@ -514,15 +619,19 @@ HonkProof ClientIVC::prove_hiding_circuit() ClientIVC::Proof ClientIVC::prove() { // deallocate the protogalaxy accumulator - fold_output.accumulator = nullptr; - - auto mega_proof = prove_hiding_circuit(); + prover_accumulator = nullptr; + auto mega_proof = verification_queue.front().proof; // A transcript is shared between the Hiding circuit prover and the Goblin prover goblin.transcript = transcript; - // Prove ECCVM and Translator - return { mega_proof, goblin.prove() }; + // Returns a proof for the hiding circuit and the Goblin proof. The latter consists of Translator and ECCVM proof + // for the whole ecc op table and the merge proof for appending the subtable coming from the hiding circuit. The + // final merging is done via appending to facilitate creating a zero-knowledge merge proof. This enables us to add + // randomness to the beginning of the tail kernel and the end of the hiding kernel, hiding the commitments and + // evaluations of both the previous table and the incoming subtable. + // https://github.com/AztecProtocol/barretenberg/issues/1360 + return { mega_proof, goblin.prove(MergeSettings::APPEND) }; }; bool ClientIVC::verify(const Proof& proof, const VerificationKey& vk) @@ -534,74 +643,88 @@ bool ClientIVC::verify(const Proof& proof, const VerificationKey& vk) MegaZKVerifier verifier{ vk.mega, /*ipa_verification_key=*/{}, civc_verifier_transcript }; auto [mega_verified, T_prev_commitments] = verifier.template verify_proof(proof.mega_proof); vinfo("Mega verified: ", mega_verified); - // Extract the commitments to the subtable corresponding to the incoming circuit - TableCommitments t_commitments = verifier.verification_key->witness_commitments.get_ecc_op_wires().get_copy(); + TableCommitments t_commitments = verifier.verifier_instance->witness_commitments.get_ecc_op_wires().get_copy(); // Goblin verification (final merge, eccvm, translator) - bool goblin_verified = - Goblin::verify(proof.goblin_proof, { t_commitments, T_prev_commitments }, civc_verifier_transcript); + bool goblin_verified = Goblin::verify( + proof.goblin_proof, { t_commitments, T_prev_commitments }, civc_verifier_transcript, MergeSettings::APPEND); vinfo("Goblin verified: ", goblin_verified); // TODO(https://github.com/AztecProtocol/barretenberg/issues/1396): State tracking in CIVC verifiers. return goblin_verified && mega_verified; } -/** - * @brief Verify a full proof of the IVC - * - * @param proof - * @return bool - */ -bool ClientIVC::verify(const Proof& proof) const -{ - return verify(proof, get_vk()); -} - /** * @brief Internal method for constructing a decider proof * * @return HonkProof */ -HonkProof ClientIVC::decider_prove() +HonkProof ClientIVC::construct_decider_proof(const std::shared_ptr& transcript) { vinfo("prove decider..."); - fold_output.accumulator->commitment_key = bn254_commitment_key; - MegaDeciderProver decider_prover(fold_output.accumulator); + prover_accumulator->commitment_key = bn254_commitment_key; + MegaDeciderProver decider_prover(prover_accumulator, transcript); decider_prover.construct_proof(); return decider_prover.export_proof(); } -/** - * @brief Construct and verify a proof for the IVC - * @note Use of this method only makes sense when the prover and verifier are the same entity, e.g. in - * development/testing. - * - */ -bool ClientIVC::prove_and_verify() -{ - auto start = std::chrono::steady_clock::now(); - const auto proof = prove(); - auto end = std::chrono::steady_clock::now(); - auto diff = std::chrono::duration_cast(end - start); - vinfo("time to call ClientIVC::prove: ", diff.count(), " ms."); - - start = end; - const bool verified = verify(proof); - end = std::chrono::steady_clock::now(); - - diff = std::chrono::duration_cast(end - start); - vinfo("time to verify ClientIVC proof: ", diff.count(), " ms."); - - return verified; -} - // Proof methods size_t ClientIVC::Proof::size() const { return mega_proof.size() + goblin_proof.size(); } +std::vector ClientIVC::Proof::to_field_elements() const +{ + HonkProof proof; + + proof.insert(proof.end(), mega_proof.begin(), mega_proof.end()); + proof.insert(proof.end(), goblin_proof.merge_proof.begin(), goblin_proof.merge_proof.end()); + proof.insert( + proof.end(), goblin_proof.eccvm_proof.pre_ipa_proof.begin(), goblin_proof.eccvm_proof.pre_ipa_proof.end()); + proof.insert(proof.end(), goblin_proof.eccvm_proof.ipa_proof.begin(), goblin_proof.eccvm_proof.ipa_proof.end()); + proof.insert(proof.end(), goblin_proof.translator_proof.begin(), goblin_proof.translator_proof.end()); + return proof; +}; + +ClientIVC::Proof ClientIVC::Proof::from_field_elements(const std::vector& fields) +{ + HonkProof mega_proof; + GoblinProof goblin_proof; + + size_t custom_public_inputs_size = fields.size() - ClientIVC::Proof::PROOF_LENGTH(); + + // Mega proof + auto start_idx = fields.begin(); + auto end_idx = start_idx + static_cast( + MegaZKFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS(MegaZKFlavor::VIRTUAL_LOG_N) + + bb::HidingKernelIO::PUBLIC_INPUTS_SIZE + custom_public_inputs_size); + mega_proof.insert(mega_proof.end(), start_idx, end_idx); + + // Merge proof + start_idx = end_idx; + end_idx += static_cast(MERGE_PROOF_SIZE); + goblin_proof.merge_proof.insert(goblin_proof.merge_proof.end(), start_idx, end_idx); + + // ECCVM pre-ipa proof + start_idx = end_idx; + end_idx += static_cast(ECCVMFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS - IPA_PROOF_LENGTH); + goblin_proof.eccvm_proof.pre_ipa_proof.insert(goblin_proof.eccvm_proof.pre_ipa_proof.end(), start_idx, end_idx); + + // ECCVM ipa proof + start_idx = end_idx; + end_idx += static_cast(IPA_PROOF_LENGTH); + goblin_proof.eccvm_proof.ipa_proof.insert(goblin_proof.eccvm_proof.ipa_proof.end(), start_idx, end_idx); + + // Translator proof + start_idx = end_idx; + end_idx += static_cast(TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS); + goblin_proof.translator_proof.insert(goblin_proof.translator_proof.end(), start_idx, end_idx); + + return { mega_proof, goblin_proof }; +}; + msgpack::sbuffer ClientIVC::Proof::to_msgpack_buffer() const { msgpack::sbuffer buffer; @@ -670,7 +793,37 @@ ClientIVC::Proof ClientIVC::Proof::from_file_msgpack(const std::string& filename // VerificationKey construction ClientIVC::VerificationKey ClientIVC::get_vk() const { - return { honk_vk, std::make_shared(), std::make_shared() }; + BB_ASSERT_EQ(verification_queue.size(), 1UL); + BB_ASSERT_EQ(verification_queue.front().type == QUEUE_TYPE::MEGA, true); + auto verification_key = verification_queue.front().honk_vk; + return { verification_key, + std::make_shared(), + std::make_shared() }; +} + +void ClientIVC::update_native_verifier_accumulator(const VerifierInputs& queue_entry, + const std::shared_ptr& verifier_transcript) +{ + auto verifier_inst = std::make_shared(queue_entry.honk_vk); + if (queue_entry.type == QUEUE_TYPE::OINK) { + verifier_transcript->load_proof(queue_entry.proof); + OinkVerifier oink_verifier{ verifier_inst, verifier_transcript }; + oink_verifier.verify(); + native_verifier_accum = verifier_inst; + native_verifier_accum->target_sum = 0; + // Get the gate challenges for sumcheck/combiner computation + native_verifier_accum->gate_challenges = + verifier_transcript->template get_powers_of_challenge("gate_challenge", CONST_PG_LOG_N); + } else { + if (queue_entry.is_kernel) { + // Fiat-Shamir the verifier accumulator + FF accum_hash = native_verifier_accum->hash_through_transcript("", *verifier_transcript); + verifier_transcript->add_to_hash_buffer("accum_hash", accum_hash); + info("Accumulator hash in PG verifier: ", accum_hash); + } + FoldingVerifier folding_verifier({ native_verifier_accum, verifier_inst }, verifier_transcript); + native_verifier_accum = folding_verifier.verify_folding_proof(queue_entry.proof); + } } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp index 10bacc6ae82d..e45cfdfefcd1 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp @@ -18,7 +18,6 @@ #include "barretenberg/stdlib/proof/proof.hpp" #include "barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include "barretenberg/ultra_honk/decider_prover.hpp" #include "barretenberg/ultra_honk/decider_verifier.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" @@ -45,16 +44,14 @@ class ClientIVC { using FF = Flavor::FF; using Point = Flavor::Curve::AffineElement; using FoldProof = std::vector; - using DeciderProvingKey = DeciderProvingKey_; - using DeciderZKProvingKey = DeciderProvingKey_; - using DeciderVerificationKey = DeciderVerificationKey_; + using ProverInstance = ProverInstance_; + using DeciderZKProvingKey = ProverInstance_; + using VerifierInstance = VerifierInstance_; using ClientCircuit = MegaCircuitBuilder; // can only be Mega using DeciderProver = DeciderProver_; using DeciderVerifier = DeciderVerifier_; - using DeciderProvingKeys = DeciderProvingKeys_; using FoldingProver = ProtogalaxyProver_; - using DeciderVerificationKeys = DeciderVerificationKeys_; - using FoldingVerifier = ProtogalaxyVerifier_; + using FoldingVerifier = ProtogalaxyVerifier_; using ECCVMVerificationKey = bb::ECCVMFlavor::VerificationKey; using TranslatorVerificationKey = bb::TranslatorFlavor::VerificationKey; using MegaProver = UltraProver_; @@ -62,13 +59,11 @@ class ClientIVC { using Transcript = NativeTranscript; using RecursiveFlavor = MegaRecursiveFlavor_; - using RecursiveDeciderVerificationKeys = - bb::stdlib::recursion::honk::RecursiveDeciderVerificationKeys_; - using RecursiveDeciderVerificationKey = RecursiveDeciderVerificationKeys::DeciderVK; + using RecursiveVerifierInstance = stdlib::recursion::honk::RecursiveVerifierInstance_; using RecursiveVerificationKey = RecursiveFlavor::VerificationKey; using RecursiveVKAndHash = RecursiveFlavor::VKAndHash; using FoldingRecursiveVerifier = - bb::stdlib::recursion::honk::ProtogalaxyRecursiveVerifier_; + bb::stdlib::recursion::honk::ProtogalaxyRecursiveVerifier_; using OinkRecursiveVerifier = stdlib::recursion::honk::OinkRecursiveVerifier_; using DeciderRecursiveVerifier = stdlib::recursion::honk::DeciderRecursiveVerifier_; using RecursiveTranscript = RecursiveFlavor::Transcript; @@ -97,8 +92,44 @@ class ClientIVC { HonkProof mega_proof; GoblinProof goblin_proof; + /** + * @brief The size of a ClientIVC proof without backend-added public inputs + * + * @param virtual_log_n + * @return constexpr size_t + */ + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = MegaZKFlavor::VIRTUAL_LOG_N) + { + return /*mega_proof*/ MegaZKFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n) + + /*merge_proof*/ MERGE_PROOF_SIZE + + /*eccvm pre-ipa proof*/ (ECCVMFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS - IPA_PROOF_LENGTH) + + /*eccvm ipa proof*/ IPA_PROOF_LENGTH + + /*translator*/ TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS; + } + + /** + * @brief The size of a ClientIVC proof with backend-added public inputs: HidingKernelIO + * + * @param virtual_log_n + * @return constexpr size_t + */ + static constexpr size_t PROOF_LENGTH(size_t virtual_log_n = MegaZKFlavor::VIRTUAL_LOG_N) + { + return PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n) + + /*public_inputs*/ bb::HidingKernelIO::PUBLIC_INPUTS_SIZE; + } + size_t size() const; + /** + * @brief Serialize proof to field elements + * + * @return std::vector + */ + std::vector to_field_elements() const; + + static Proof from_field_elements(const std::vector& fields); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1299): The following msgpack methods are generic // and should leverage some kind of shared msgpack utility. msgpack::sbuffer to_msgpack_buffer() const; @@ -111,7 +142,7 @@ class ClientIVC { * @return uint8_t* Double size-prefixed msgpack buffer */ uint8_t* to_msgpack_heap_buffer() const; - static constexpr const char* MSGPACK_SCHEMA_NAME = "ClientIVCProof"; + static constexpr const char MSGPACK_SCHEMA_NAME[] = "ClientIVCProof"; class DeserializationError : public std::runtime_error { public: @@ -135,10 +166,69 @@ class ClientIVC { std::shared_ptr eccvm; std::shared_ptr translator; - MSGPACK_FIELDS(mega, eccvm, translator); + /** + * @brief Calculate the number of field elements needed for serialization + * @return size_t Number of field elements + */ + static size_t calc_num_data_types() + { + return MegaVerificationKey::calc_num_data_types() + ECCVMVerificationKey::calc_num_data_types() + + TranslatorVerificationKey::calc_num_data_types(); + } + + /** + * @brief Serialize verification key to field elements + * @return std::vector The serialized field elements + */ + std::vector to_field_elements() const + { + std::vector elements; + + auto mega_elements = mega->to_field_elements(); + elements.insert(elements.end(), mega_elements.begin(), mega_elements.end()); + + auto eccvm_elements = eccvm->to_field_elements(); + elements.insert(elements.end(), eccvm_elements.begin(), eccvm_elements.end()); + + auto translator_elements = translator->to_field_elements(); + elements.insert(elements.end(), translator_elements.begin(), translator_elements.end()); + + return elements; + } + + /** + * @brief Deserialize verification key from field elements + * @param elements The field elements to deserialize from + * @return size_t Number of field elements read + */ + size_t from_field_elements(std::span elements) + { + size_t read_idx = 0; + + mega = std::make_shared(); + size_t mega_read = mega->from_field_elements(elements.subspan(read_idx)); + read_idx += mega_read; + + eccvm = std::make_shared(); + size_t eccvm_read = eccvm->from_field_elements(elements.subspan(read_idx)); + read_idx += eccvm_read; + + translator = std::make_shared(); + size_t translator_read = translator->from_field_elements(elements.subspan(read_idx)); + read_idx += translator_read; + + return read_idx; + } }; - enum class QUEUE_TYPE { OINK, PG, PG_FINAL, PG_TAIL }; // for specifying type of proof in the verification queue + // Specifies proof type or equivalently the type of recursive verification to be performed on a given proof + enum class QUEUE_TYPE { + OINK, + PG, + PG_FINAL, // the final PG verification, used in hiding kernel + PG_TAIL, // used in tail to indicate special handling of merge for ZK + MEGA + }; // An entry in the native verification queue struct VerifierInputs { @@ -162,29 +252,22 @@ class ClientIVC { ExecutionTraceUsageTracker trace_usage_tracker; private: - using ProverFoldOutput = FoldingResult; - // Transcript for CIVC prover (shared between Hiding circuit, Merge, ECCVM, and Translator) std::shared_ptr transcript = std::make_shared(); // Transcript to be shared across the folding of K_{i-1} (kernel), A_{i,1} (app), .., A_{i, n} std::shared_ptr prover_accumulation_transcript = std::make_shared(); - std::unique_ptr hiding_circuit; - size_t num_circuits; // total number of circuits to be accumulated in the IVC public: size_t num_circuits_accumulated = 0; // number of circuits accumulated so far - ProverFoldOutput fold_output; // prover accumulator and fold proof - HonkProof decider_proof; // decider proof to be verified in the hiding circuit - HonkProof mega_proof; // proof of the hiding circuit + std::shared_ptr prover_accumulator; // current PG prover accumulator instance + HonkProof decider_proof; // decider proof to be verified in the hiding circuit - std::shared_ptr - recursive_verifier_native_accum; // native verifier accumulator used in recursive folding - std::shared_ptr - native_verifier_accum; // native verifier accumulator used in prover folding - std::shared_ptr honk_vk; // honk vk to be completed and folded into the accumulator + std::shared_ptr + recursive_verifier_native_accum; // native verifier accumulator used in recursive folding + std::shared_ptr native_verifier_accum; // native verifier accumulator used in prover folding // Set of tuples {proof, verification_key, type (Oink/PG)} to be recursively verified VerificationQueue verification_queue; @@ -208,22 +291,18 @@ class ClientIVC { void instantiate_stdlib_verification_queue(ClientCircuit& circuit, const std::vector>& input_keys = {}); - [[nodiscard("Pairing points should be accumulated")]] std::pair - perform_recursive_verification_and_databus_consistency_checks( - ClientCircuit& circuit, - const StdlibVerifierInputs& verifier_inputs, - const TableCommitments& T_prev_commitments, - const std::shared_ptr& accumulation_recursive_transcript); + [[nodiscard("Pairing points should be accumulated")]] std:: + tuple, PairingPoints, TableCommitments> + perform_recursive_verification_and_databus_consistency_checks( + ClientCircuit& circuit, + const StdlibVerifierInputs& verifier_inputs, + const std::shared_ptr& input_verifier_accumulator, + const TableCommitments& T_prev_commitments, + const std::shared_ptr& accumulation_recursive_transcript); // Complete the logic of a kernel circuit (e.g. PG/merge recursive verification, databus consistency checks) void complete_kernel_circuit_logic(ClientCircuit& circuit); - // Complete the logic of the hiding circuit, which includes PG, decider and merge recursive verification - std::pair complete_hiding_circuit_logic( - const StdlibProof& stdlib_proof, - const std::shared_ptr& stdlib_vk_and_hash, - ClientCircuit& circuit); - /** * @brief Perform prover work for accumulation (e.g. PG folding, merge proving) * @@ -236,20 +315,82 @@ class ClientIVC { Proof prove(); - std::shared_ptr construct_hiding_circuit_key(); - std::shared_ptr compute_hiding_circuit_proving_key(); static void hide_op_queue_accumulation_result(ClientCircuit& circuit); - HonkProof prove_hiding_circuit(); + static void hide_op_queue_content_in_tail(ClientCircuit& circuit); + static void hide_op_queue_content_in_hiding(ClientCircuit& circuit); static bool verify(const Proof& proof, const VerificationKey& vk); - bool verify(const Proof& proof) const; + HonkProof construct_decider_proof(const std::shared_ptr& transcript); + + VerificationKey get_vk() const; + + private: + /** + * @brief Runs either Oink or PG native verifier to update the native verifier accumulator + * + * @param queue_entry The verifier inputs from the queue. + * @param verifier_transcript Verifier transcript corresponding to the prover transcript. + */ + void update_native_verifier_accumulator(const VerifierInputs& queue_entry, + const std::shared_ptr& verifier_transcript); - bool prove_and_verify(); + HonkProof construct_oink_proof(const std::shared_ptr& prover_instance, + const std::shared_ptr& honk_vk, + const std::shared_ptr& transcript); - HonkProof decider_prove(); + HonkProof construct_pg_proof(const std::shared_ptr& prover_instance, + const std::shared_ptr& honk_vk, + const std::shared_ptr& transcript, + bool is_kernel); - VerificationKey get_vk() const; + HonkProof construct_honk_proof_for_hiding_kernel(ClientCircuit& circuit, + const std::shared_ptr& verification_key); + + QUEUE_TYPE get_queue_type() const; + + static std::shared_ptr perform_oink_recursive_verification( + ClientCircuit& circuit, + const std::shared_ptr& verifier_instance, + const std::shared_ptr& transcript, + const StdlibProof& proof); + + static std::shared_ptr perform_pg_recursive_verification( + ClientCircuit& circuit, + const std::shared_ptr& verifier_accumulator, + const std::shared_ptr& verifier_instance, + const std::shared_ptr& transcript, + const StdlibProof& proof, + std::optional& prev_accum_hash, + bool is_kernel); }; +// Serialization methods for ClientIVC::VerificationKey +inline void read(uint8_t const*& it, ClientIVC::VerificationKey& vk) +{ + using serialize::read; + + size_t num_frs = ClientIVC::VerificationKey::calc_num_data_types(); + + // Read exactly num_frs field elements from the buffer + std::vector field_elements(num_frs); + for (auto& element : field_elements) { + read(it, element); + } + + // Then use from_field_elements to populate the verification key + vk.from_field_elements(field_elements); +} + +inline void write(std::vector& buf, ClientIVC::VerificationKey const& vk) +{ + using serialize::write; + + // Convert to field elements and write them directly without length prefix + auto field_elements = vk.to_field_elements(); + for (const auto& element : field_elements) { + write(buf, element); + } +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp index bf02f4b34c78..371a574de86f 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp @@ -15,9 +15,9 @@ using namespace bb; -static constexpr size_t MAX_NUM_KERNELS = 15; static constexpr size_t SMALL_LOG_2_NUM_GATES = 5; -static constexpr size_t MEDIUM_LOG_2_NUM_GATES = 16; +// TODO(https://github.com/AztecProtocol/barretenberg/issues/1511): The CIVC class should enforce the minimum number of +// circuits in a test flow. class ClientIVCTests : public ::testing::Test { protected: @@ -28,21 +28,20 @@ class ClientIVCTests : public ::testing::Test { using Commitment = Flavor::Commitment; using VerificationKey = Flavor::VerificationKey; using Builder = ClientIVC::ClientCircuit; - using DeciderProvingKey = ClientIVC::DeciderProvingKey; - using DeciderVerificationKey = ClientIVC::DeciderVerificationKey; + using ProverInstance = ClientIVC::ProverInstance; + using VerifierInstance = ClientIVC::VerifierInstance; using FoldProof = ClientIVC::FoldProof; using DeciderProver = ClientIVC::DeciderProver; using DeciderVerifier = ClientIVC::DeciderVerifier; - using DeciderProvingKeys = DeciderProvingKeys_; using FoldingProver = ProtogalaxyProver_; - using DeciderVerificationKeys = DeciderVerificationKeys_; - using FoldingVerifier = ProtogalaxyVerifier_; + using FoldingVerifier = ProtogalaxyVerifier_; + using CircuitProducer = PrivateFunctionExecutionMockCircuitProducer; + public: /** * @brief Tamper with a proof - * - * @details The first value in the proof after the public inputs is the commitment to the wire w.l (see OinkProver). - * We modify the commitment by adding Commitment::one(). + * @details The first value in the proof after the public inputs is the commitment to the wire w.l (see + * OinkProver). We modify the commitment by adding Commitment::one(). * */ static void tamper_with_proof(FoldProof& proof, size_t public_inputs_offset) @@ -57,51 +56,31 @@ class ClientIVCTests : public ::testing::Test { } } - static std::pair generate_ivc_proof(size_t num_circuits) + static std::pair accumulate_and_prove_ivc(size_t num_app_circuits, + TestSettings settings = {}) { - ClientIVC ivc{ num_circuits, { SMALL_TEST_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + CircuitProducer circuit_producer(num_app_circuits); + const size_t num_circuits = circuit_producer.total_num_circuits; + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + ClientIVC ivc{ num_circuits, trace_settings }; + for (size_t j = 0; j < num_circuits; ++j) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); + circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); } return { ivc.prove(), ivc.get_vk() }; }; }; /** - * @brief A simple-as-possible test demonstrating IVC for two mock circuits - * @details When accumulating only two circuits, only a single round of folding is performed thus no recursive - * verification occurs. - * - */ -TEST_F(ClientIVCTests, Basic) -{ - ClientIVC ivc{ /*num_circuits=*/2 }; - TestSettings settings{ .log2_num_gates = MEDIUM_LOG_2_NUM_GATES }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - // Initialize the IVC with an arbitrary circuit - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); - // Create another circuit and accumulate - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); - - EXPECT_TRUE(ivc.prove_and_verify()); -}; - -/** - * @brief A simple test demonstrating IVC for four mock circuits, which is slightly more than minimal. - * @details When accumulating only four circuits, we execute all the functionality of a full ClientIVC run. + * @brief Using a structured trace allows for the accumulation of circuits of varying size * */ -TEST_F(ClientIVCTests, BasicFour) +TEST_F(ClientIVCTests, BasicStructured) { - ClientIVC ivc{ /*num_circuits=*/4 }; + const size_t NUM_APP_CIRCUITS = 1; + auto [proof, vk] = accumulate_and_prove_ivc(NUM_APP_CIRCUITS); - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t idx = 0; idx < 4; ++idx) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - - EXPECT_TRUE(ivc.prove_and_verify()); + EXPECT_TRUE(ClientIVC::verify(proof, vk)); }; /** @@ -113,26 +92,31 @@ TEST_F(ClientIVCTests, BasicFour) */ TEST_F(ClientIVCTests, BadProofFailure) { - const size_t NUM_CIRCUITS = 4; + BB_DISABLE_ASSERTS(); // Disable assert in PG prover + + const size_t NUM_APP_CIRCUITS = 2; + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; // Confirm that the IVC verifies if nothing is tampered with { - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + CircuitProducer circuit_producer(NUM_APP_CIRCUITS); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; // Construct and accumulate a set of mocked private function execution circuits for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); } - EXPECT_TRUE(ivc.prove_and_verify()); + auto proof = ivc.prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc.get_vk())); } // The IVC throws an exception if the FIRST fold proof is tampered with { - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE } }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + CircuitProducer circuit_producer(NUM_APP_CIRCUITS); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; size_t num_public_inputs = 0; @@ -152,15 +136,15 @@ TEST_F(ClientIVCTests, BadProofFailure) num_public_inputs); // tamper with first proof } } - circuit_producer.construct_hiding_kernel(ivc); - EXPECT_FALSE(ivc.prove_and_verify()); + auto proof = ivc.prove(); + EXPECT_FALSE(ClientIVC::verify(proof, ivc.get_vk())); } // The IVC fails if the SECOND fold proof is tampered with { - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE } }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + CircuitProducer circuit_producer(NUM_APP_CIRCUITS); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; // Construct and accumulate a set of mocked private function execution circuits for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { @@ -174,77 +158,27 @@ TEST_F(ClientIVCTests, BadProofFailure) circuit.num_public_inputs()); // tamper with second proof } } - circuit_producer.construct_hiding_kernel(ivc); - EXPECT_FALSE(ivc.prove_and_verify()); - } - - // The IVC fails if the 3rd/FINAL fold proof is tampered with - { - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE } }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - - size_t num_public_inputs = 0; - - // Construct and accumulate a set of mocked private function execution circuits - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - auto [circuit, vk] = - circuit_producer.create_next_circuit_and_vk(ivc, { .log2_num_gates = SMALL_LOG_2_NUM_GATES }); - ivc.accumulate(circuit, vk); - - if (idx == NUM_CIRCUITS - 1) { - num_public_inputs = circuit.num_public_inputs(); - } - } - - // Only a single proof should be present in the queue when verification of the IVC is performed - EXPECT_EQ(ivc.verification_queue.size(), 1); - tamper_with_proof(ivc.verification_queue[0].proof, - num_public_inputs); // tamper with the final fold proof - - circuit_producer.construct_hiding_kernel(ivc); - - EXPECT_FALSE(ivc.prove_and_verify()); + auto proof = ivc.prove(); + EXPECT_FALSE(ClientIVC::verify(proof, ivc.get_vk())); } EXPECT_TRUE(true); }; /** - * @brief Using a structured trace allows for the accumulation of circuits of varying size - * - */ -TEST_F(ClientIVCTests, BasicStructured) -{ - const size_t NUM_CIRCUITS = 4; - TestSettings settings; - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE } }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - // Construct and accumulate some circuits of varying size - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - settings.log2_num_gates = SMALL_LOG_2_NUM_GATES + idx; - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); - } - - EXPECT_TRUE(ivc.prove_and_verify()); -}; - -/** - * @brief Produce 2 valid CIVC proofs. Ensure that replacing a proof component with a component from a different - proof + * @brief Produce 2 valid CIVC proofs. Ensure that replacing a proof component with a component from a different proof * leads to a verification failure. * */ TEST_F(ClientIVCTests, WrongProofComponentFailure) { // Produce two valid proofs - auto [civc_proof_1, civc_vk_1] = generate_ivc_proof(/*num_circuits=*/2); + auto [civc_proof_1, civc_vk_1] = accumulate_and_prove_ivc(/*num_app_circuits=*/1); { EXPECT_TRUE(ClientIVC::verify(civc_proof_1, civc_vk_1)); } - auto [civc_proof_2, civc_vk_2] = generate_ivc_proof(/*num_circuits=*/2); + auto [civc_proof_2, civc_vk_2] = accumulate_and_prove_ivc(/*num_app_circuits=*/1); { EXPECT_TRUE(ClientIVC::verify(civc_proof_2, civc_vk_2)); } @@ -292,46 +226,26 @@ TEST_F(ClientIVCTests, WrongProofComponentFailure) */ TEST_F(ClientIVCTests, VKIndependenceTest) { - const size_t MIN_NUM_CIRCUITS = 2; - // Folding more than 20 circuits requires to double the number of gates in Translator. - const size_t MAX_NUM_CIRCUITS = 20; const TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; - auto generate_vk = [&](size_t num_circuits) { - ClientIVC ivc{ num_circuits, { SMALL_TEST_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t j = 0; j < num_circuits; ++j) { - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); - } - ivc.prove(); - auto ivc_vk = ivc.get_vk(); - - // PCS verification keys will not match so set to null before comparing - ivc_vk.eccvm->pcs_verification_key = VerifierCommitmentKey(); - - return ivc_vk; - }; - - auto civc_vk_2 = generate_vk(MIN_NUM_CIRCUITS); - auto civc_vk_20 = generate_vk(MAX_NUM_CIRCUITS); + auto [unused_1, civc_vk_1] = accumulate_and_prove_ivc(/*num_app_circuits=*/1, settings); + auto [unused_2, civc_vk_2] = accumulate_and_prove_ivc(/*num_app_circuits=*/3, settings); // Check the equality of the Mega components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_2.mega.get(), *civc_vk_20.mega.get()); + EXPECT_EQ(*civc_vk_1.mega.get(), *civc_vk_2.mega.get()); // Check the equality of the ECCVM components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_2.eccvm.get(), *civc_vk_20.eccvm.get()); + EXPECT_EQ(*civc_vk_1.eccvm.get(), *civc_vk_2.eccvm.get()); // Check the equality of the Translator components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_2.translator.get(), *civc_vk_20.translator.get()); + EXPECT_EQ(*civc_vk_1.translator.get(), *civc_vk_2.translator.get()); }; /** * @brief Ensure that the CIVC VK is independent of whether any of the circuits being accumulated overflows the * structured trace - * @details If one of the circuits being accumulated overflows the structured trace, the dyadic size of the - accumulator - * may increase. In this case we want to ensure that the CIVC VK (and in particular the hiding circuit VK) is - identical + * @details If one of the circuits being accumulated overflows the structured trace, the dyadic size of the accumulator + * may increase. In this case we want to ensure that the CIVC VK (and in particular the hiding circuit VK) is identical * to the non-overflow case. This requires, for example, that the padding_indicator_array logic used in somecheck is * functioning properly. */ @@ -339,95 +253,44 @@ TEST_F(ClientIVCTests, VKIndependenceWithOverflow) { // Run IVC for two sets of circuits: a nomical case where all circuits fit within the structured trace and an // "overflow" case where all (but importantly at least one) circuit overflows the structured trace. - const size_t NUM_CIRCUITS = 4; + const size_t NUM_APP_CIRCUITS = 1; const size_t log2_num_gates_nominal = 5; // number of gates in baseline mocked circuits const size_t log2_num_gates_overflow = 18; // number of gates in the "overflow" mocked circuit - TraceStructure trace_structure = SMALL_TEST_STRUCTURE; - - // Check that we will indeed overflow the trace structure - EXPECT_TRUE(1 << log2_num_gates_overflow > trace_structure.size()); // 1 << 18 > 1 << 16 - TestSettings settings; - auto generate_vk = [&](size_t num_circuits, size_t log2_num_gates) { - ClientIVC ivc{ num_circuits, { trace_structure } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t j = 0; j < num_circuits; ++j) { - settings.log2_num_gates = log2_num_gates; - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings); - } - ivc.prove(); - auto ivc_vk = ivc.get_vk(); - - // PCS verification keys will not match so set to null before comparing - ivc_vk.eccvm->pcs_verification_key = VerifierCommitmentKey(); - - return ivc_vk; - }; + const TestSettings settings_1{ .log2_num_gates = log2_num_gates_nominal }; + const TestSettings settings_2{ .log2_num_gates = log2_num_gates_overflow }; - auto civc_vk_nominal = generate_vk(NUM_CIRCUITS, log2_num_gates_nominal); - auto civc_vk_overflow = generate_vk(NUM_CIRCUITS, log2_num_gates_overflow); + auto [unused_1, civc_vk_1] = accumulate_and_prove_ivc(NUM_APP_CIRCUITS, settings_1); + auto [unused_2, civc_vk_2] = accumulate_and_prove_ivc(NUM_APP_CIRCUITS, settings_2); // Check the equality of the Mega components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_nominal.mega.get(), *civc_vk_overflow.mega.get()); + EXPECT_EQ(*civc_vk_1.mega.get(), *civc_vk_2.mega.get()); // Check the equality of the ECCVM components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_nominal.eccvm.get(), *civc_vk_overflow.eccvm.get()); + EXPECT_EQ(*civc_vk_1.eccvm.get(), *civc_vk_2.eccvm.get()); // Check the equality of the Translator components of the ClientIVC VKeys. - EXPECT_EQ(*civc_vk_nominal.translator.get(), *civc_vk_overflow.translator.get()); + EXPECT_EQ(*civc_vk_1.translator.get(), *civc_vk_2.translator.get()); }; /** - * @brief Test that running the benchmark suite with mocked verification keys will not error out. + * @brief Test to establish the "max" number of apps that can be accumulated due to limitations on the ECCVM size + * */ -HEAVY_TEST(ClientIVCBenchValidation, Full6MockedVKs) -{ - const auto run_test = []() { - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - - const size_t total_num_circuits{ 12 }; - ClientIVC ivc{ total_num_circuits, { AZTEC_TRACE_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - auto mocked_vks = mock_vks(total_num_circuits); - perform_ivc_accumulation_rounds(total_num_circuits, ivc, mocked_vks, /* mock_vk */ true); - auto proof = ivc.prove(); - verify_ivc(proof, ivc); - }; - ASSERT_NO_FATAL_FAILURE(run_test()); -} - HEAVY_TEST(ClientIVCKernelCapacity, MaxCapacityPassing) { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - const size_t total_num_circuits{ 2 * MAX_NUM_KERNELS }; - ClientIVC ivc{ total_num_circuits, { AZTEC_TRACE_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t j = 0; j < total_num_circuits; ++j) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - auto proof = ivc.prove(); - bool verified = verify_ivc(proof, ivc); - EXPECT_TRUE(verified); -} - -HEAVY_TEST(ClientIVCKernelCapacity, MaxCapacityFailing) -{ - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); + const size_t NUM_APP_CIRCUITS = 14; + auto [proof, vk] = ClientIVCTests::accumulate_and_prove_ivc(NUM_APP_CIRCUITS); - const size_t total_num_circuits{ 2 * (MAX_NUM_KERNELS + 1) }; - ClientIVC ivc{ total_num_circuits, { AZTEC_TRACE_STRUCTURE } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - for (size_t j = 0; j < total_num_circuits; ++j) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - EXPECT_ANY_THROW(ivc.prove()); -} + bool verified = ClientIVC::verify(proof, vk); + EXPECT_TRUE(verified); +}; /** * @brief Test use of structured trace overflow block mechanism - * @details Accumulate 4 circuits which have progressively more arithmetic gates. The final two overflow the - prescribed + * @details Accumulate 4 circuits which have progressively more arithmetic gates. The final two overflow the prescribed * arithmetic block size and make use of the overflow block which has sufficient capacity. * */ @@ -435,10 +298,11 @@ TEST_F(ClientIVCTests, StructuredTraceOverflow) { // Define trace settings with sufficient overflow capacity to accommodate each of the circuits to be accumulated - const size_t NUM_CIRCUITS = 4; + size_t NUM_APP_CIRCUITS = 1; + CircuitProducer circuit_producer(NUM_APP_CIRCUITS); + size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE, /*overflow_capacity=*/1 << 17 } }; TestSettings settings; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; // Construct and accumulate some circuits of varying size size_t log2_num_gates = 14; @@ -448,7 +312,8 @@ TEST_F(ClientIVCTests, StructuredTraceOverflow) log2_num_gates += 1; } - EXPECT_TRUE(ivc.prove_and_verify()); + auto proof = ivc.prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc.get_vk())); }; /** @@ -466,20 +331,16 @@ TEST_F(ClientIVCTests, DynamicTraceOverflow) // accumulation. We distinguish between a simple overflow that exceeds one or more structured trace capacities but // does not bump the dyadic circuit size and an overflow that does increase the dyadic circuit size. std::vector test_cases = { - { "Case 1", { 18, 14 } }, /* first circuit overflows with dyadic size increase */ - { "Case 2", { 14, 16 } }, /* simple overlow (no dyadic size increase)*/ - { "Case 3", { 14, 18 } }, /* overflow with dyadic size increase*/ - { "Case 4", { 14, 18, 14, 16 } }, /* dyadic size overflow then simple overflow */ - { "Case 5", { 14, 16, 14, 18 } }, /* simple overflow then dyadic size overflow */ + { "Case 1", { 14, 18, 14, 16, 14 } }, /* dyadic size overflow then simple overflow */ + { "Case 2", { 14, 16, 14, 18, 14 } }, /* simple overflow then dyadic size overflow */ }; for (const auto& test : test_cases) { SCOPED_TRACE(test.name); // improves test output readability - uint32_t overflow_capacity = 0; - const size_t NUM_CIRCUITS = test.log2_num_arith_gates.size(); - ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE_FOR_OVERFLOWS, overflow_capacity } }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + CircuitProducer circuit_producer(/*num_app_circuits=*/1); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, { SMALL_TEST_STRUCTURE_FOR_OVERFLOWS } }; // Accumulate for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { @@ -487,94 +348,82 @@ TEST_F(ClientIVCTests, DynamicTraceOverflow) ivc, { .log2_num_gates = test.log2_num_arith_gates[idx] }); } - EXPECT_EQ(check_accumulator_target_sum_manual(ivc.fold_output.accumulator), true); - EXPECT_TRUE(ivc.prove_and_verify()); + EXPECT_EQ(check_accumulator_target_sum_manual(ivc.prover_accumulator), true); + auto proof = ivc.prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc.get_vk())); } } /** - * @brief Test methods for serializing and deserializing a proof to/from a file in msgpack format + * @brief Test methods for serializing and deserializing a proof to/from a file/buffer in msgpack format * */ -TEST_F(ClientIVCTests, MsgpackProofFromFile) +TEST_F(ClientIVCTests, MsgpackProofFromFileOrBuffer) { - ClientIVC ivc{ /*num_circuits=*/2 }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - - // Initialize the IVC with an arbitrary circuit - circuit_producer.construct_and_accumulate_next_circuit(ivc); - - // Create another circuit and accumulate - circuit_producer.construct_and_accumulate_next_circuit(ivc); - - const auto proof = ivc.prove(); - - // Serialize/deserialize the proof to/from a file as proof-of-concept - const std::string filename = "proof.msgpack"; - proof.to_file_msgpack(filename); - auto proof_deserialized = ClientIVC::Proof::from_file_msgpack(filename); + // Generate an arbitrary valid CICV proof + TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; + auto [proof, vk] = accumulate_and_prove_ivc(/*num_app_circuits=*/1, settings); - EXPECT_TRUE(ivc.verify(proof_deserialized)); -}; + { // Serialize/deserialize the proof to/from a file, check that it verifies + const std::string filename = "proof.msgpack"; + proof.to_file_msgpack(filename); + auto proof_deserialized = ClientIVC::Proof::from_file_msgpack(filename); -/** - * @brief Test methods for serializing and deserializing a proof to/from a "heap" buffer in msgpack format - * - */ -TEST_F(ClientIVCTests, MsgpackProofFromBuffer) -{ - ClientIVC ivc{ /*num_circuits=*/2 }; + EXPECT_TRUE(ClientIVC::verify(proof_deserialized, vk)); + } - PrivateFunctionExecutionMockCircuitProducer circuit_producer; + { // Serialize/deserialize proof to/from a heap buffer, check that it verifies + uint8_t* buffer = proof.to_msgpack_heap_buffer(); + auto uint8_buffer = from_buffer>(buffer); + uint8_t const* uint8_ptr = uint8_buffer.data(); + auto proof_deserialized = ClientIVC::Proof::from_msgpack_buffer(uint8_ptr); - // Initialize the IVC with an arbitrary circuit - circuit_producer.construct_and_accumulate_next_circuit(ivc); + EXPECT_TRUE(ClientIVC::verify(proof_deserialized, vk)); + } - // Create another circuit and accumulate - circuit_producer.construct_and_accumulate_next_circuit(ivc); + { // Check that attempting to deserialize a proof from a buffer with random bytes fails gracefully + msgpack::sbuffer buffer = proof.to_msgpack_buffer(); + auto proof_deserialized = ClientIVC::Proof::from_msgpack_buffer(buffer); + EXPECT_TRUE(ClientIVC::verify(proof_deserialized, vk)); - const auto proof = ivc.prove(); + std::vector random_bytes(buffer.size()); + std::generate(random_bytes.begin(), random_bytes.end(), []() { return static_cast(rand() % 256); }); + std::copy(random_bytes.begin(), random_bytes.end(), buffer.data()); - // Serialize/deserialize proof to/from a heap buffer, check that it verifies - uint8_t* buffer = proof.to_msgpack_heap_buffer(); - auto uint8_buffer = from_buffer>(buffer); - uint8_t const* uint8_ptr = uint8_buffer.data(); - auto proof_deserialized = ClientIVC::Proof::from_msgpack_buffer(uint8_ptr); - EXPECT_TRUE(ivc.verify(proof_deserialized)); - aligned_free(buffer); + // Expect deserialization to fail with error msgpack::v1::type_error with description "std::bad_cast" + EXPECT_THROW(ClientIVC::Proof::from_msgpack_buffer(buffer), msgpack::v1::type_error); + } }; /** - * @brief Check that a CIVC proof can be serialized and deserialized via msgpack and that attempting to deserialize - * a random buffer of bytes fails gracefully with a type error + * @brief Demonstrate that a databus inconsistency leads to verification failure for the IVC + * @details Kernel circuits contain databus consistency checks that establish that data was passed faithfully between + * circuits, e.g. the output (return_data) of an app was the input (secondary_calldata) of a kernel. This test tampers + * with the databus in such a way that one of the kernels receives secondary_calldata based on tampered app return data. + * This leads to an invalid witness in the check that ensures that the two corresponding commitments are equal and thus + * causes failure of the IVC to verify. + * */ -TEST_F(ClientIVCTests, RandomProofBytes) +TEST_F(ClientIVCTests, DatabusFailure) { - ClientIVC ivc{ /*num_circuits=*/2 }; - - PrivateFunctionExecutionMockCircuitProducer circuit_producer; - - // Initialize the IVC with an arbitrary circuit - circuit_producer.construct_and_accumulate_next_circuit(ivc); + BB_DISABLE_ASSERTS(); // Disable assert in PG prover - // Create another circuit and accumulate - circuit_producer.construct_and_accumulate_next_circuit(ivc); + PrivateFunctionExecutionMockCircuitProducer circuit_producer{ /*num_app_circuits=*/1 }; + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - const auto proof = ivc.prove(); + // Construct and accumulate a series of mocked private function execution circuits + for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { + auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc); - // Serialize/deserialize proof to msgpack buffer, check that it verifies - msgpack::sbuffer buffer = proof.to_msgpack_buffer(); - auto proof_deserialized = ClientIVC::Proof::from_msgpack_buffer(buffer); - EXPECT_TRUE(ivc.verify(proof_deserialized)); + // Tamper with the return data of the app circuit before it is processed as input to the next kernel + if (idx == 0) { + circuit_producer.tamper_with_databus(); + } - // Overwrite the buffer with random bytes for testing failure case - { - std::vector random_bytes(buffer.size()); - std::generate(random_bytes.begin(), random_bytes.end(), []() { return static_cast(rand() % 256); }); - std::copy(random_bytes.begin(), random_bytes.end(), buffer.data()); + ivc.accumulate(circuit, vk); } - // Expect deserialization to fail with error msgpack::v1::type_error with description "std::bad_cast" - EXPECT_THROW(ClientIVC::Proof::from_msgpack_buffer(buffer), msgpack::v1::type_error); + auto proof = ivc.prove(); + EXPECT_FALSE(ClientIVC::verify(proof, ivc.get_vk())); }; diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc_integration.test.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc_integration.test.cpp deleted file mode 100644 index 092ba72f12ce..000000000000 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc_integration.test.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include "barretenberg/client_ivc/client_ivc.hpp" -#include "barretenberg/client_ivc/mock_circuit_producer.hpp" -#include "barretenberg/goblin/goblin.hpp" -#include "barretenberg/goblin/mock_circuits.hpp" -#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" -#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" - -#include - -using namespace bb; - -/** - * @brief A test suite that mirrors the logic in the nominal IVC benchmark case - * - */ -class ClientIVCIntegrationTests : public ::testing::Test { - protected: - static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } - - using Flavor = ClientIVC::Flavor; - using FF = typename Flavor::FF; - using VerificationKey = Flavor::VerificationKey; - using Builder = ClientIVC::ClientCircuit; - using MockCircuitProducer = PrivateFunctionExecutionMockCircuitProducer; -}; - -/** - * @brief Prove and verify accumulation of a set of mocked private function execution circuits - * @details This case is meant to mirror the medium complexity benchmark configuration case but processes only 6 - * circuits total (3 app, 3 kernel) to save time. - * - */ -TEST_F(ClientIVCIntegrationTests, BenchmarkCaseSimple) -{ - const size_t NUM_CIRCUITS = 6; - ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - - MockCircuitProducer circuit_producer; - - // Construct and accumulate a series of mocked private function execution circuits - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - - EXPECT_TRUE(ivc.prove_and_verify()); -}; - -/** - * @brief Accumulate a set of circuits that includes consecutive kernels - * @details In practice its common to have multiple consecutive kernels without intermittent apps e.g. an inner followed - * immediately by a reset, or an inner-reset-tail sequence. This test ensures that such cases are handled correctly. - * - */ -TEST_F(ClientIVCIntegrationTests, ConsecutiveKernels) -{ - const size_t NUM_CIRCUITS = 6; - ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - - MockCircuitProducer circuit_producer; - - // Accumulate a series of mocked circuits (app, kernel, app, kernel) - for (size_t idx = 0; idx < NUM_CIRCUITS - 2; ++idx) { - circuit_producer.construct_and_accumulate_next_circuit(ivc); - } - - // Cap the IVC with two more kernels (say, a 'reset' and a 'tail') without intermittent apps - circuit_producer.construct_and_accumulate_next_circuit(ivc, { .force_is_kernel = true }); - circuit_producer.construct_and_accumulate_next_circuit(ivc, { .force_is_kernel = true }); - - EXPECT_TRUE(ivc.prove_and_verify()); -}; - -/** - * @brief Demonstrate that a databus inconsistency leads to verification failure for the IVC - * @details Kernel circuits contain databus consistency checks that establish that data was passed faithfully between - * circuits, e.g. the output (return_data) of an app was the input (secondary_calldata) of a kernel. This test - tampers - * with the databus in such a way that one of the kernels receives secondary_calldata based on tampered app return - data. - * This leads to an invalid witness in the check that ensures that the two corresponding commitments are equal and - thus - * causes failure of the IVC to verify. - * - */ -TEST_F(ClientIVCIntegrationTests, DatabusFailure) -{ - size_t NUM_CIRCUITS = 6; - ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - - MockCircuitProducer circuit_producer; - - // Construct and accumulate a series of mocked private function execution circuits - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc); - - // Tamper with the return data of the second app circuit before it is processed as input to the next kernel - if (idx == 2) { - circuit_producer.tamper_with_databus(); - } - - ivc.accumulate(circuit, vk); - if (ivc.num_circuits_accumulated == NUM_CIRCUITS) { - circuit_producer.construct_hiding_kernel(ivc); - } - } - - EXPECT_FALSE(ivc.prove_and_verify()); -}; diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/mock_circuit_producer.hpp b/barretenberg/cpp/src/barretenberg/client_ivc/mock_circuit_producer.hpp index 3cdcc93004fa..2302b4098a51 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/mock_circuit_producer.hpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/mock_circuit_producer.hpp @@ -7,7 +7,7 @@ #pragma once #include "barretenberg/client_ivc/client_ivc.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/goblin/mock_circuits.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" @@ -95,9 +95,6 @@ struct TestSettings { // number of public inputs to manually add to circuits, by default this would be 0 because we use the // MockDatabusProducer to test public inputs handling size_t num_public_inputs = 0; - // force the next circuit to be a kernel in order to test the occurence of consecutive kernels (expected behaviour - // in real flows) - bool force_is_kernel = false; // by default we will create more complex apps and kernel with various types of gates but in case we want to // specifically test overflow behaviour or unstructured circuits we can manually construct simple circuits with a // specified number of gates @@ -118,15 +115,26 @@ class PrivateFunctionExecutionMockCircuitProducer { using VerificationKey = Flavor::VerificationKey; size_t circuit_counter = 0; + std::vector is_kernel_flags; MockDatabusProducer mock_databus; bool large_first_app = true; - bool is_kernel = false; // whether the next circuit is a kernel or not + constexpr static size_t NUM_TRAILING_KERNELS = 3; // reset, tail, hiding public: - PrivateFunctionExecutionMockCircuitProducer(bool large_first_app = true) + size_t total_num_circuits = 0; + + PrivateFunctionExecutionMockCircuitProducer(size_t num_app_circuits, bool large_first_app = true) : large_first_app(large_first_app) - {} + , total_num_circuits(num_app_circuits * 2 + + NUM_TRAILING_KERNELS) /*One kernel per app, plus a fixed number of final kernels*/ + { + // Set flags indicating which circuits are kernels vs apps + is_kernel_flags.resize(total_num_circuits, true); + for (size_t i = 0; i < num_app_circuits; ++i) { + is_kernel_flags[2 * i] = false; // every other circuit is an app + } + } /** * @brief Precompute the verification key for the given circuit. @@ -142,53 +150,48 @@ class PrivateFunctionExecutionMockCircuitProducer { // Deepcopy the opqueue to avoid modifying the original one when finalising the circuit builder.op_queue = std::make_shared(*builder.op_queue); - std::shared_ptr proving_key = - std::make_shared(builder, trace_settings); - std::shared_ptr vk = std::make_shared(proving_key->get_precomputed()); + std::shared_ptr prover_instance = + std::make_shared(builder, trace_settings); + std::shared_ptr vk = std::make_shared(prover_instance->get_precomputed()); return vk; } - ClientCircuit create_simple_circuit(ClientIVC& ivc, size_t log2_num_gates, size_t num_public_inputs) + /** + * @brief Create either a circuit with certain number of gates or a more realistic circuit (withv various custom + * gates and databus usage) in case number of gates is not specified, that is also filled up to 2^17 or 2^19 if + * large. + * + */ + ClientCircuit create_next_circuit(ClientIVC& ivc, size_t log2_num_gates = 0, size_t num_public_inputs = 0) { + const bool is_kernel = is_kernel_flags[circuit_counter]; + circuit_counter++; - is_kernel = (circuit_counter % 2 == 0); + ClientCircuit circuit{ ivc.goblin.op_queue }; - MockCircuits::construct_arithmetic_circuit(circuit, log2_num_gates, /* include_public_inputs= */ false); - if (num_public_inputs > 0) { - // Add some public inputs to the circuit + // if the number of gates is specified we just add a number of arithmetic gates + if (log2_num_gates != 0) { + MockCircuits::construct_arithmetic_circuit(circuit, log2_num_gates, /* include_public_inputs= */ false); + // Add some public inputs for (size_t i = 0; i < num_public_inputs; ++i) { circuit.add_public_variable(13634816 + i); // arbitrary number } - } - if (is_kernel) { - ivc.complete_kernel_circuit_logic(circuit); } else { - stdlib::recursion::PairingPoints::add_default_to_public_inputs(circuit); + // If the number of gates is not specified we create a structured mock circuit + if (is_kernel) { + GoblinMockCircuits::construct_mock_folding_kernel(circuit); // construct mock base logic + mock_databus.populate_kernel_databus(circuit); // populate databus inputs/outputs + } else { + bool use_large_circuit = large_first_app && (circuit_counter == 1); // first circuit is size 2^19 + GoblinMockCircuits::construct_mock_app_circuit(circuit, use_large_circuit); // construct mock app + mock_databus.populate_app_databus(circuit); // populate databus outputs + } } - return circuit; - } - /** - * @brief Create a more realistic circuit (withv various custom gates and databus usage) that is also filled up to - * 2^17 or 2^19 if large. - * - */ - ClientCircuit create_next_circuit(ClientIVC& ivc, bool force_is_kernel = false) - { - circuit_counter++; - - // Assume only every second circuit is a kernel, unless force_is_kernel == true - bool is_kernel = (circuit_counter % 2 == 0) || force_is_kernel; - - ClientCircuit circuit{ ivc.goblin.op_queue }; if (is_kernel) { - GoblinMockCircuits::construct_mock_folding_kernel(circuit); // construct mock base logic - mock_databus.populate_kernel_databus(circuit); // populate databus inputs/outputs - ivc.complete_kernel_circuit_logic(circuit); // complete with recursive verifiers etc + ivc.complete_kernel_circuit_logic(circuit); } else { - bool use_large_circuit = large_first_app && (circuit_counter == 1); // first circuit is size 2^19 - GoblinMockCircuits::construct_mock_app_circuit(circuit, use_large_circuit); // construct mock app - mock_databus.populate_app_databus(circuit); // populate databus outputs + stdlib::recursion::PairingPoints::add_default_to_public_inputs(circuit); } return circuit; } @@ -199,16 +202,12 @@ class PrivateFunctionExecutionMockCircuitProducer { std::pair> create_next_circuit_and_vk(ClientIVC& ivc, TestSettings settings = {}) { - - // If a specific number of gates is specified we create a simple circuit with only arithmetic gates to easily - // control the total number of gates. - if (settings.log2_num_gates != 0) { - ClientCircuit circuit = create_simple_circuit(ivc, settings.log2_num_gates, settings.num_public_inputs); - return { circuit, get_verification_key(circuit, ivc.trace_settings) }; + // If this is a mock hiding kernel, remove the settings and use a default (non-structured) trace + if (ivc.num_circuits_accumulated == ivc.get_num_circuits() - 1) { + settings = TestSettings{}; + ivc.trace_settings = TraceSettings{}; } - - ClientCircuit circuit = create_next_circuit(ivc, settings.force_is_kernel); // construct the circuit - + auto circuit = create_next_circuit(ivc, settings.log2_num_gates, settings.num_public_inputs); return { circuit, get_verification_key(circuit, ivc.trace_settings) }; } @@ -216,26 +215,12 @@ class PrivateFunctionExecutionMockCircuitProducer { { auto [circuit, vk] = create_next_circuit_and_vk(ivc, settings); ivc.accumulate(circuit, vk); - - if (circuit_counter == ivc.get_num_circuits()) { - construct_hiding_kernel(ivc); - } } /** * @brief Tamper with databus data to facilitate failure testing */ void tamper_with_databus() { mock_databus.tamper_with_app_return_data(); } - /** - * @brief Creates the hiding circuit to complete IVC accumulation - */ - static void construct_hiding_kernel(ClientIVC& ivc) - { - // create a builder from the goblin op_queue - ClientIVC::ClientCircuit circuit{ ivc.goblin.op_queue }; - // complete the hiding kernel logic - ivc.complete_kernel_circuit_logic(circuit); - } }; } // namespace diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/mock_kernel_pinning.test.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/mock_kernel_pinning.test.cpp index f093a0a02996..cb6c7ecc3ae7 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/mock_kernel_pinning.test.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/mock_kernel_pinning.test.cpp @@ -23,18 +23,21 @@ class MockKernelTest : public ::testing::Test { TEST_F(MockKernelTest, PinFoldingKernelSizes) { - const size_t NUM_CIRCUITS = 4; + MockCircuitProducer circuit_producer{ /*num_app_circuits=*/1 }; + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; ClientIVC ivc{ NUM_CIRCUITS, { AZTEC_TRACE_STRUCTURE } }; - MockCircuitProducer circuit_producer; - // Construct and accumulate a series of mocked private function execution circuits for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc); ivc.accumulate(circuit, vk); - EXPECT_TRUE(circuit.blocks.has_overflow); // trace overflow mechanism should be triggered + // Expect trace overflow for all but the hiding kernel (final circuit) + if (idx < NUM_CIRCUITS - 1) { + EXPECT_TRUE(circuit.blocks.has_overflow); + EXPECT_EQ(ivc.prover_accumulator->log_dyadic_size(), 19); + } else { + EXPECT_FALSE(circuit.blocks.has_overflow); + } } - - EXPECT_EQ(ivc.fold_output.accumulator->log_dyadic_size(), 19); } diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/private_execution_steps.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/private_execution_steps.cpp index 1d0ef3fdfe2c..3e090973569e 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/private_execution_steps.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/private_execution_steps.cpp @@ -1,4 +1,5 @@ #include "private_execution_steps.hpp" +#include "barretenberg/client_ivc/client_ivc.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" #include @@ -76,7 +77,7 @@ template T unpack_from_file(const std::filesystem::path& filename) // TODO(#7371) we should not have so many levels of serialization here. std::vector PrivateExecutionStepRaw::load(const std::filesystem::path& input_path) { - PROFILE_THIS(); + BB_BENCH(); return unpack_from_file>(input_path); } @@ -91,7 +92,7 @@ void PrivateExecutionStepRaw::self_decompress() std::vector PrivateExecutionStepRaw::load_and_decompress( const std::filesystem::path& input_path) { - PROFILE_THIS(); + BB_BENCH(); auto raw_steps = load(input_path); for (PrivateExecutionStepRaw& step : raw_steps) { step.bytecode = decompress(step.bytecode.data(), step.bytecode.size()); @@ -111,7 +112,7 @@ std::vector PrivateExecutionStepRaw::parse_uncompressed void PrivateExecutionSteps::parse(std::vector&& steps) { - PROFILE_THIS(); + BB_BENCH(); // Preallocate space to write into diretly as push_back would not be thread safe folding_stack.resize(steps.size()); @@ -132,8 +133,7 @@ void PrivateExecutionSteps::parse(std::vector&& steps) // For backwards compatibility, but it affects performance and correctness. precomputed_vks[i] = nullptr; } else { - auto vk = from_buffer>(step.vk); - precomputed_vks[i] = vk; + precomputed_vks[i] = from_buffer>(step.vk); } function_names[i] = step.function_name; } @@ -173,8 +173,6 @@ void PrivateExecutionStepRaw::compress_and_save(std::vector accumulate_and_prove_ivc_with_precomputed_vks( + size_t num_app_circuits, auto& precomputed_vks, const bool large_first_app = true) { - BB_ASSERT_EQ(precomputed_vks.size(), NUM_CIRCUITS, "There should be a precomputed VK for each circuit"); + PrivateFunctionExecutionMockCircuitProducer circuit_producer(num_app_circuits, large_first_app); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; - PrivateFunctionExecutionMockCircuitProducer circuit_producer(large_first_app); + BB_ASSERT_EQ(precomputed_vks.size(), NUM_CIRCUITS, "There should be a precomputed VK for each circuit"); for (size_t circuit_idx = 0; circuit_idx < NUM_CIRCUITS; ++circuit_idx) { MegaCircuitBuilder circuit; { - PROFILE_THIS_NAME("construct_circuits"); + BB_BENCH_NAME("construct_circuits"); circuit = circuit_producer.create_next_circuit(ivc); } ivc.accumulate(circuit, precomputed_vks[circuit_idx]); } - circuit_producer.construct_hiding_kernel(ivc); + return { ivc.prove(), ivc.get_vk() }; } -std::vector> mock_vks(const size_t num_circuits, - const bool large_first_app = true) +std::vector> precompute_vks(const size_t num_app_circuits, + const bool large_first_app = true) { - // Create an app and kernel vk with metadata set - PrivateFunctionExecutionMockCircuitProducer circuit_producer{ large_first_app }; - ClientIVC ivc{ 2, { AZTEC_TRACE_STRUCTURE } }; - auto [app_circuit, app_vk] = circuit_producer.create_next_circuit_and_vk(ivc); - ivc.accumulate(app_circuit, app_vk); - auto [kernel_circuit, kernel_vk] = circuit_producer.create_next_circuit_and_vk(ivc); + using CircuitProducer = PrivateFunctionExecutionMockCircuitProducer; + CircuitProducer circuit_producer(num_app_circuits, large_first_app); + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; std::vector> vkeys; + for (size_t j = 0; j < NUM_CIRCUITS; ++j) { + + auto circuit = circuit_producer.create_next_circuit(ivc); - for (size_t idx = 0; idx < num_circuits; ++idx) { - auto key = idx % 2 == 0 ? app_vk : kernel_vk; // alternate between app and kernel vks - vkeys.push_back(key); + // Hiding kernel does not use structured trace + if (j == NUM_CIRCUITS - 1) { + trace_settings = TraceSettings{}; + } + auto vk = CircuitProducer::get_verification_key(circuit, trace_settings); + vkeys.push_back(vk); + ivc.accumulate(circuit, vk); } return vkeys; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp index d33f7dfe71b4..b532164fc3ca 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp @@ -13,7 +13,8 @@ * simplify the codebase. */ -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" +#include "barretenberg/common/ref_span.hpp" #include "barretenberg/constants.hpp" #include "barretenberg/ecc/batched_affine_addition/batched_affine_addition.hpp" #include "barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp" @@ -25,6 +26,8 @@ #include "barretenberg/srs/global_crs.hpp" #include +#include +#include #include #include @@ -89,7 +92,7 @@ template class CommitmentKey { // Note: this fn used to expand polynomials to the dyadic size, // due to a quirk in how our pippenger algo used to function. // The pippenger algo has been refactored and this is no longer an issue - PROFILE_THIS_NAME("commit"); + BB_BENCH_NAME("CommitmentKey::commit"); std::span point_table = srs->get_monomial_points(); size_t consumed_srs = polynomial.start_index + polynomial.size(); if (consumed_srs > srs->get_monomial_size()) { @@ -103,6 +106,79 @@ template class CommitmentKey { Commitment point(r); return point; }; + /** + * @brief Batch commitment to multiple polynomials + * @details Uses batch_multi_scalar_mul for more efficient processing when committing to multiple polynomials + * + * @param polynomials vector of polynomial spans to commit to + * @return std::vector vector of commitments, one for each polynomial + */ + std::vector batch_commit(RefSpan> polynomials, + size_t max_batch_size = std::numeric_limits::max()) const + { + BB_BENCH_NAME("CommitmentKey::batch_commit"); + + // We can only commit max_batch_size at a time + // This is to prevent excessive memory usage in the pippenger algorithm + // First batch, create the commitments vector + std::vector commitments; + + for (size_t i = 0; i < polynomials.size();) { + // Note: have to be careful how we compute this to not overlow e.g. max_batch_size + 1 would + size_t batch_size = std::min(max_batch_size, polynomials.size() - i); + size_t batch_end = i + batch_size; + + // Prepare spans for batch MSM + std::vector> points_spans; + std::vector> scalar_spans; + + for (auto& polynomial : polynomials.subspan(i, batch_end - i)) { + std::span point_table = srs->get_monomial_points().subspan(polynomial.start_index()); + size_t consumed_srs = polynomial.start_index() + polynomial.size(); + if (consumed_srs > srs->get_monomial_size()) { + throw_or_abort(format("Attempting to commit to a polynomial that needs ", + consumed_srs, + " points with an SRS of size ", + srs->get_monomial_size())); + } + scalar_spans.emplace_back(polynomial.coeffs()); + points_spans.emplace_back(point_table); + } + + // Perform batch MSM + auto results = scalar_multiplication::MSM::batch_multi_scalar_mul(points_spans, scalar_spans, false); + for (const auto& result : results) { + commitments.emplace_back(result); + } + i += batch_size; + } + return commitments; + }; + + // helper builder struct for constructing a batch to commit at once + struct CommitBatch { + CommitmentKey* key; + RefVector> wires; + std::vector labels; + void commit_and_send_to_verifier(auto transcript, size_t max_batch_size = std::numeric_limits::max()) + { + std::vector commitments = key->batch_commit(wires, max_batch_size); + for (size_t i = 0; i < commitments.size(); ++i) { + transcript->send_to_verifier(labels[i], commitments[i]); + } + } + + void add_to_batch(Polynomial& poly, const std::string& label, bool mask) + { + if (mask) { + poly.mask(); + } + wires.push_back(poly); + labels.push_back(label); + } + }; + + CommitBatch start_batch() { return CommitBatch{ this, {}, {} }; } /** * @brief Efficiently commit to a polynomial whose nonzero elements are arranged in discrete blocks @@ -122,7 +198,7 @@ template class CommitmentKey { const std::vector>& active_ranges, size_t final_active_wire_idx = 0) { - PROFILE_THIS_NAME("commit"); + BB_BENCH_NAME("CommitmentKey::commit_structured"); BB_ASSERT_LTE(polynomial.end_index(), srs->get_monomial_size(), "Polynomial size exceeds commitment key size."); BB_ASSERT_LTE(polynomial.end_index(), dyadic_size, "Polynomial size exceeds commitment key size."); @@ -184,7 +260,7 @@ template class CommitmentKey { const std::vector>& active_ranges, size_t final_active_wire_idx = 0) { - PROFILE_THIS_NAME("commit"); + BB_BENCH_NAME("CommitmentKey::commit_structured_with_nonzero_complement"); BB_ASSERT_LTE(polynomial.end_index(), srs->get_monomial_size(), "Polynomial size exceeds commitment key size."); using BatchedAddition = BatchedAffineAddition; @@ -253,7 +329,7 @@ template class CommitmentKey { return result; } - enum class CommitType { Default, Structured, Sparse, StructuredNonZeroComplement }; + enum class CommitType { Default, StructuredNonZeroComplement }; Commitment commit_with_type(PolynomialSpan poly, CommitType type, @@ -261,9 +337,6 @@ template class CommitmentKey { size_t final_active_wire_idx = 0) { switch (type) { - case CommitType::Structured: - case CommitType::Sparse: - return commit(poly); case CommitType::StructuredNonZeroComplement: return commit_structured_with_nonzero_complement(poly, active_ranges, final_active_wire_idx); case CommitType::Default: diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp index c98899035b1c..80e24236d7a6 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp @@ -8,6 +8,8 @@ #include "barretenberg/commitment_schemes/claim.hpp" #include "barretenberg/commitment_schemes/claim_batcher.hpp" +#include "barretenberg/common/bb_bench.hpp" +#include "barretenberg/common/thread.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/transcript/transcript.hpp" @@ -51,8 +53,8 @@ namespace bb { /** * @brief Prover output (evalutation pair, witness) that can be passed on to Shplonk batch opening. - * @details Evaluation pairs {r, A₀₊(r)}, {-r, A₀₋(-r)}, {r^{2^j}, Aⱼ(r^{2^j)}, {-r^{2^j}, Aⱼ(-r^{2^j)}, j = [1, ..., - * m-1] and witness (Fold) polynomials + * @details Evaluation pairs {r, A₀₊(r)}, {-r, A₀₋(-r)}, {r^{2^j}, Aⱼ(r^{2^j)}, {-r^{2^j}, Aⱼ(-r^{2^j)}, j = [1, + * ..., m-1] and witness (Fold) polynomials * [ * A₀₊(X) = F(X) + r⁻¹⋅G(X) * A₀₋(X) = F(X) - r⁻¹⋅G(X) @@ -108,17 +110,21 @@ template class GeminiProver_ { public: /** - * @brief Class responsible for computation of the batched multilinear polynomials required by the Gemini protocol - * @details Opening multivariate polynomials using Gemini requires the computation of three batched polynomials. The - * first, here denoted A₀, is a linear combination of all polynomials to be opened. If we denote the linear + * @brief Class responsible for computation of the batched multilinear polynomials required by the Gemini + * protocol + * @details Opening multivariate polynomials using Gemini requires the computation of three batched polynomials. + * The first, here denoted A₀, is a linear combination of all polynomials to be opened. If we denote the linear * combinations (based on challenge rho) of the unshifted, to-be-shifted-by-1, and to-be-right-shifted-by-k - * polynomials by F, G, and H respectively, then A₀ = F + G/X + X^k*H. (Note: 'k' is assumed even and thus a factor - * (-1)^k in not needed for the evaluation at -r). This polynomial is "folded" in Gemini to produce d-1 univariate - * polynomials Fold_i, i = 1, ..., d-1. The second and third are the partially evaluated batched polynomials A₀₊ = F - * + G/r + r^K*H, and A₀₋ = F - G/r + r^K*H. These are required in order to prove the opening of shifted polynomials - * G_i/X, X^k*H_i and from the commitments to their unshifted counterparts G_i and H_i. - * @note TODO(https://github.com/AztecProtocol/barretenberg/issues/1223): There are certain operations herein that - * could be made more efficient by e.g. reusing already initialized polynomials, possibly at the expense of clarity. + * polynomials by F, G, and H respectively, then A₀ = F + G/X + X^k*H. (Note: 'k' is assumed even and thus a + * factor + * (-1)^k in not needed for the evaluation at -r). This polynomial is "folded" in Gemini to produce d-1 + * univariate polynomials Fold_i, i = 1, ..., d-1. The second and third are the partially evaluated batched + * polynomials A₀₊ = F + * + G/r + r^K*H, and A₀₋ = F - G/r + r^K*H. These are required in order to prove the opening of shifted + * polynomials G_i/X, X^k*H_i and from the commitments to their unshifted counterparts G_i and H_i. + * @note TODO(https://github.com/AztecProtocol/barretenberg/issues/1223): There are certain operations herein + * that could be made more efficient by e.g. reusing already initialized polynomials, possibly at the expense of + * clarity. */ class PolynomialBatcher { @@ -194,6 +200,7 @@ template class GeminiProver_ { */ Polynomial compute_batched(const Fr& challenge, Fr& running_scalar) { + BB_BENCH_NAME("compute_batched"); // lambda for batching polynomials; updates the running scalar in place auto batch = [&](Polynomial& batched, const RefVector& polynomials_to_batch) { for (auto& poly : polynomials_to_batch) { @@ -234,13 +241,18 @@ template class GeminiProver_ { for (size_t i = 0; i < groups_to_be_interleaved[0].size(); ++i) { batched_group.push_back(Polynomial(full_batched_size)); } + for (size_t i = 0; i < groups_to_be_interleaved.size(); ++i) { batched_interleaved.add_scaled(interleaved[i], running_scalar); - for (size_t j = 0; j < groups_to_be_interleaved[0].size(); ++j) { - batched_group[j].add_scaled(groups_to_be_interleaved[i][j], running_scalar); - } + // Use parallel chunking for the batching operations + parallel_for([this, running_scalar, i](const ThreadChunk& chunk) { + for (size_t j = 0; j < groups_to_be_interleaved[0].size(); ++j) { + batched_group[j].add_scaled_chunk(chunk, groups_to_be_interleaved[i][j], running_scalar); + } + }); running_scalar *= challenge; } + full_batched += batched_interleaved; } @@ -248,7 +260,8 @@ template class GeminiProver_ { } /** - * @brief Compute partially evaluated batched polynomials A₀(X, r) = A₀₊ = F + G/r, A₀(X, -r) = A₀₋ = F - G/r + * @brief Compute partially evaluated batched polynomials A₀(X, r) = A₀₊ = F + G/r, A₀(X, -r) = A₀₋ = F - + * G/r * @details If the random polynomial is set, it is added to each batched polynomial for ZK * * @param r_challenge partial evaluation challenge @@ -288,9 +301,9 @@ template class GeminiProver_ { * @brief Compute the partially evaluated polynomials P₊(X, r) and P₋(X, -r) * * @details If the interleaved polynomials are set, the full partially evaluated identites A₀(r) and A₀(-r) - * contain the contributions of P₊(r^s) and P₋(r^s) respectively where s is the size of the interleaved group - * assumed even. This function computes P₊(X) = ∑ r^i Pᵢ(X) and P₋(X) = ∑ (-r)^i Pᵢ(X) where Pᵢ(X) is the i-th - * polynomial in the batched group. + * contain the contributions of P₊(r^s) and P₋(r^s) respectively where s is the size of the interleaved + * group assumed even. This function computes P₊(X) = ∑ r^i Pᵢ(X) and P₋(X) = ∑ (-r)^i Pᵢ(X) where Pᵢ(X) is + * the i-th polynomial in the batched group. * @param r_challenge partial evaluation challenge * @return std::pair {P₊, P₋} */ @@ -317,7 +330,8 @@ template class GeminiProver_ { static std::vector compute_fold_polynomials(const size_t log_n, std::span multilinear_challenge, - const Polynomial& A_0); + const Polynomial& A_0, + const bool& has_zk = false); static std::pair compute_partially_evaluated_batch_polynomials( const size_t log_n, @@ -430,10 +444,8 @@ template class GeminiVerifier_ { for (auto [group_commitments, interleaved_evaluation] : zip_view( claim_batcher.get_interleaved().commitments_groups, claim_batcher.get_interleaved().evaluations)) { - // Compute the contribution from each group j of commitments Gⱼ = {C₀, C₁, C₂, C₃, ..., Cₛ₋₁} where s is - // assumed even - // C_P_pos += ∑ᵢ ρᵏ⁺ᵐ⁺ʲ⋅ rⁱ ⋅ Cᵢ - // C_P_neg += ∑ᵢ ρᵏ⁺ᵐ⁺ʲ⋅ (-r)ⁱ ⋅ Cᵢ + // Compute the contribution from each group j of commitments Gⱼ = {C₀, C₁, C₂, C₃, ..., Cₛ₋₁} where + // s is assumed even C_P_pos += ∑ᵢ ρᵏ⁺ᵐ⁺ʲ⋅ rⁱ ⋅ Cᵢ C_P_neg += ∑ᵢ ρᵏ⁺ᵐ⁺ʲ⋅ (-r)ⁱ ⋅ Cᵢ for (size_t i = 0; i < interleaved_group_size; ++i) { C_P_pos += group_commitments[i] * batching_scalar * r_shifts_pos[i]; C_P_neg += group_commitments[i] * batching_scalar * r_shifts_neg[i]; @@ -528,10 +540,11 @@ template class GeminiVerifier_ { * Recall that \f$ A_0(r) = \sum \rho^i \cdot f_i + \frac{1}{r} \cdot \sum \rho^{i+k} g_i \f$, where \f$ * k \f$ is the number of "unshifted" commitments. * - * @details Initialize `a_pos` = \f$ A_{d}(r) \f$ with the batched evaluation \f$ \sum \rho^i f_i(\vec{u}) + \sum + * @details Initialize `a_pos` = \f$ A_{d}(r) \f$ with the batched evaluation \f$ \sum \rho^i f_i(\vec{u}) + + * \sum * \rho^{i+k} g_i(\vec{u}) \f$. The verifier recovers \f$ A_{l-1}(r^{2^{l-1}}) \f$ from the "negative" value \f$ - * A_{l-1}\left(-r^{2^{l-1}}\right) \f$ received from the prover and the value \f$ A_{l}\left(r^{2^{l}}\right) \f$ - * computed at the previous step. Namely, the verifier computes + * A_{l-1}\left(-r^{2^{l-1}}\right) \f$ received from the prover and the value \f$ A_{l}\left(r^{2^{l}}\right) + * \f$ computed at the previous step. Namely, the verifier computes * \f{align}{ A_{l-1}\left(r^{2^{l-1}}\right) = * \frac{2 \cdot r^{2^{l-1}} \cdot A_{l}\left(r^{2^l}\right) - A_{l-1}\left( -r^{2^{l-1}} \right)\cdot * \left(r^{2^{l-1}} (1-u_{l-1}) - u_{l-1}\right)} {r^{2^{l-1}} (1- u_{l-1}) + u_{l-1}}. \f} @@ -540,16 +553,17 @@ template class GeminiVerifier_ { * P_{-}(-r^s)\f$, where \f$ s \f$ is the size of the group to be interleaved. * * This method uses `padding_indicator_array`, whose i-th entry is FF{1} if i < log_n and 0 otherwise. - * We use these entries to either assign `eval_pos_prev` the value `eval_pos` computed in the current iteration of - * the loop, or to propagate the batched evaluation of the multilinear polynomials to the next iteration. This - * ensures the correctnes of the computation of the required positive evaluations. + * We use these entries to either assign `eval_pos_prev` the value `eval_pos` computed in the current iteration + * of the loop, or to propagate the batched evaluation of the multilinear polynomials to the next iteration. + * This ensures the correctnes of the computation of the required positive evaluations. * * To ensure that dummy evaluations cannot be used to tamper with the final batch_mul result, we multiply dummy * positive evaluations by the entries of `padding_indicator_array`. * * @param padding_indicator_array An array with first log_n entries equal to 1, and the remaining entries are 0. * @param batched_evaluation The evaluation of the batched polynomial at \f$ (u_0, \ldots, u_{d-1})\f$. - * @param evaluation_point Evaluation point \f$ (u_0, \ldots, u_{d-1}) \f$ padded to CONST_PROOF_SIZE_LOG_N. + * @param evaluation_point Evaluation point \f$ (u_0, \ldots, u_{d-1}) \f$. Depending on the context, might be + * padded to `virtual_log_n` size. * @param challenge_powers Powers of \f$ r \f$, \f$ r^2 \), ..., \( r^{2^{d-1}} \f$. * @param fold_neg_evals Evaluations \f$ A_{i-1}(-r^{2^{i-1}}) \f$. * @return \f A_{i}}(r^{2^{i}})\f$ \f$ i = 0, \ldots, \text{virtual_log_n} - 1 \f$. diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp index c92a8bfde139..9e31a93b6f06 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp @@ -10,14 +10,28 @@ template class GeminiTest : public CommitmentTest { using GeminiVerifier = GeminiVerifier_; using Fr = typename Curve::ScalarField; using Commitment = typename Curve::AffineElement; + using ClaimBatcher = ClaimBatcher_; + using ClaimBatch = ClaimBatcher::Batch; public: static constexpr size_t log_n = 4; static constexpr size_t n = 1UL << log_n; + static constexpr size_t virtual_log_n = 6; + using CK = CommitmentKey; using VK = VerifierCommitmentKey; + // is_big_ck is set to true in the high degree attack test. It uses a larger SRS size (big_ck_size=2^14) and allows + // the prover + // to commit to high degree polynomials (big_n=2^12). + bool is_big_ck = false; + static constexpr size_t big_n = 1UL << 12; + static constexpr size_t small_log_n = 3; + static constexpr size_t big_ck_size = 1 << 14; + inline static CK big_ck = create_commitment_key(big_ck_size); + bool is_reject_case = false; + static CK ck; static VK vk; @@ -30,13 +44,17 @@ template class GeminiTest : public CommitmentTest { void execute_gemini_and_verify_claims(std::vector& multilinear_evaluation_point, MockClaimGenerator mock_claims) { + const size_t poly_size = is_big_ck ? big_n : n; + const CK& comkey = is_big_ck ? big_ck : ck; + const size_t multilinear_challenge_size = is_big_ck ? small_log_n : log_n; + auto prover_transcript = NativeTranscript::prover_init_empty(); // Compute: // - (d+1) opening pairs: {r, \hat{a}_0}, {-r^{2^i}, a_i}, i = 0, ..., d-1 // - (d+1) Fold polynomials Fold_{r}^(0), Fold_{-r}^(0), and Fold^(i), i = 0, ..., d-1 auto prover_output = GeminiProver::prove( - this->n, mock_claims.polynomial_batcher, multilinear_evaluation_point, ck, prover_transcript); + poly_size, mock_claims.polynomial_batcher, multilinear_evaluation_point, comkey, prover_transcript); // The prover output needs to be completed by adding the "positive" Fold claims, i.e. evaluations of Fold^(i) at // r^{2^i} for i=1, ..., d-1. Although here we are copying polynomials, it is not the case when GeminiProver is @@ -44,7 +62,7 @@ template class GeminiTest : public CommitmentTest { std::vector> prover_claims_with_pos_evals; // `prover_output` consists of d+1 opening claims, we add another d-1 claims for each positive evaluation // Fold^i(r^{2^i}) for i = 1, ..., d-1 - const size_t total_num_claims = 2 * log_n; + const size_t total_num_claims = 2 * multilinear_challenge_size; prover_claims_with_pos_evals.reserve(total_num_claims); for (auto& claim : prover_output) { @@ -76,13 +94,97 @@ template class GeminiTest : public CommitmentTest { auto verifier_claims = GeminiVerifier::reduce_verification( multilinear_evaluation_point, mock_claims.claim_batcher, verifier_transcript); + // Check equality of the opening pairs computed by prover and verifier + if (this->is_reject_case) { + bool mismatch = false; + for (auto [prover_claim, verifier_claim] : zip_view(prover_claims_with_pos_evals, verifier_claims)) { + if (prover_claim.opening_pair != verifier_claim.opening_pair) { + mismatch = true; + break; + } + } + EXPECT_TRUE(mismatch) << "Expected a mismatch in opening pairs, but all matched."; + } else { + for (auto [prover_claim, verifier_claim] : zip_view(prover_claims_with_pos_evals, verifier_claims)) { + this->verify_opening_claim(verifier_claim, prover_claim.polynomial, comkey); + ASSERT_EQ(prover_claim.opening_pair, verifier_claim.opening_pair); + } + } + } + + void open_extension_by_zero() + { + auto prover_transcript = NativeTranscript::prover_init_empty(); + + auto u = this->random_evaluation_point(virtual_log_n); + + Polynomial poly((1UL << log_n)); + + poly.at(0) = 1; + poly.at(1) = 2; + poly.at(2) = 3; + + typename GeminiProver::PolynomialBatcher poly_batcher(1UL << log_n); + poly_batcher.set_unshifted(RefVector(poly)); + + // As we are opening `poly` extended by zero from `log_n` dimensions to `virtual_log_n` dimensions, it needs to + // be multiplied by appropriate scalars. + Fr eval = poly.evaluate_mle(std::span(u).subspan(0, log_n)) * (Fr(1) - u[virtual_log_n - 1]) * + (Fr(1) - u[virtual_log_n - 2]); + auto comm = ck.commit(poly); + auto claim_batcher = ClaimBatcher{ .unshifted = ClaimBatch{ RefVector(comm), RefVector(eval) } }; + + // Compute: + // - (d+1) opening pairs: {r, \hat{a}_0}, {-r^{2^i}, a_i}, i = 0, ..., d-1 + // - (d+1) Fold polynomials Fold_{r}^(0), Fold_{-r}^(0), and Fold^(i), i = 0, ..., d-1 + auto prover_output = GeminiProver::prove(1UL << log_n, poly_batcher, u, ck, prover_transcript); + + // The prover output needs to be completed by adding the "positive" Fold claims, i.e. evaluations of + // Fold^(i) at r^{2^i} for i=1, ..., d-1. Although here we are copying polynomials, it is not the case when + // GeminiProver is combined with ShplonkProver. + std::vector> prover_claims_with_pos_evals; + // `prover_output` consists of d+1 opening claims, we add another d-1 claims for each positive evaluation + // Fold^i(r^{2^i}) for i = 1, ..., d-1 + const size_t total_num_claims = 2 * virtual_log_n; + prover_claims_with_pos_evals.reserve(total_num_claims); + + for (auto& claim : prover_output) { + if (claim.gemini_fold) { + if (claim.gemini_fold) { + // "positive" evaluation challenge r^{2^i} for i = 1, ..., d-1 + const Fr evaluation_challenge = -claim.opening_pair.challenge; + // Fold^(i) at r^{2^i} for i=1, ..., d-1 + const Fr pos_evaluation = claim.polynomial.evaluate(evaluation_challenge); + + // Add the positive Fold claims to the vector of claims + ProverOpeningClaim pos_fold_claim = { .polynomial = claim.polynomial, + .opening_pair = { .challenge = evaluation_challenge, + .evaluation = pos_evaluation } }; + prover_claims_with_pos_evals.emplace_back(pos_fold_claim); + } + } + prover_claims_with_pos_evals.emplace_back(claim); + } + + // Check that the Fold polynomials have been evaluated correctly in the prover + this->verify_batch_opening_pair(prover_claims_with_pos_evals); + + auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript); + + // Compute: + // - d opening pairs: {r^{2^i}, \hat{a}_i} for i = 0, ..., d-1 + // - 2 partially evaluated Fold polynomial commitments [Fold_{r}^(0)] and [Fold_{-r}^(0)] + // Aggregate: 2d opening pairs and 2d Fold poly commitments into verifier claim + auto verifier_claims = GeminiVerifier::reduce_verification(u, claim_batcher, verifier_transcript); // Check equality of the opening pairs computed by prover and verifier for (auto [prover_claim, verifier_claim] : zip_view(prover_claims_with_pos_evals, verifier_claims)) { this->verify_opening_claim(verifier_claim, prover_claim.polynomial, ck); ASSERT_EQ(prover_claim.opening_pair, verifier_claim.opening_pair); } } -}; +} + +; using ParamsTypes = ::testing::Types; TYPED_TEST_SUITE(GeminiTest, ParamsTypes); @@ -144,6 +246,10 @@ TYPED_TEST(GeminiTest, DoubleWithShiftAndInterleaving) this->execute_gemini_and_verify_claims(u, mock_claims); } +TYPED_TEST(GeminiTest, OpenExtensionByZero) +{ + TestFixture::open_extension_by_zero(); +} /** * @brief Implementation of the [attack described by Ariel](https://hackmd.io/zm5SDfBqTKKXGpI-zQHtpA?view). * @@ -192,8 +298,8 @@ TYPED_TEST(GeminiTest, SoundnessRegression) fold_1.at(2) = -(Fr(1) - u[1]) * fold_1.at(1) * u[1].invert(); // fold₁[2] = -(1 - u₁) ⋅ fold₁[1] / u₁ fold_1.at(3) = Fr(0); - prover_transcript->template send_to_verifier("Gemini:FOLD_1", this->ck.commit(fold_1)); - prover_transcript->template send_to_verifier("Gemini:FOLD_2", this->ck.commit(fold_2)); + prover_transcript->send_to_verifier("Gemini:FOLD_1", this->ck.commit(fold_1)); + prover_transcript->send_to_verifier("Gemini:FOLD_2", this->ck.commit(fold_2)); // Get Gemini evaluation challenge const Fr gemini_r = prover_transcript->template get_challenge("Gemini:r"); @@ -207,9 +313,9 @@ TYPED_TEST(GeminiTest, SoundnessRegression) // Compute honest evaluations fold₁(-r²) and fold₂(-r⁴) fold_evals.emplace_back(fold_1.evaluate(-r_squares[1])); fold_evals.emplace_back(fold_2.evaluate(-r_squares[2])); - prover_transcript->template send_to_verifier("Gemini:a_1", fold_evals[0]); - prover_transcript->template send_to_verifier("Gemini:a_2", fold_evals[1]); - prover_transcript->template send_to_verifier("Gemini:a_3", fold_evals[2]); + prover_transcript->send_to_verifier("Gemini:a_1", fold_evals[0]); + prover_transcript->send_to_verifier("Gemini:a_2", fold_evals[1]); + prover_transcript->send_to_verifier("Gemini:a_3", fold_evals[2]); // Compute the powers of r used by the verifier. It is an artifact of the const proof size logic. const std::vector gemini_eval_challenge_powers = gemini::powers_of_evaluation_challenge(gemini_r, log_n); @@ -247,12 +353,69 @@ TYPED_TEST(GeminiTest, SoundnessRegression) EXPECT_TRUE(prover_opening_claims[idx].opening_pair == verifier_claims[idx].opening_pair); } - // The mismatch in claims below leads to Gemini and Shplemini Verifier rejecting the tampered proof and confirms the - // necessity of opening `fold_i` at r^{2^i} for i = 1, ..., log_n - 1. + // The mismatch in claims below leads to Gemini and Shplemini Verifier rejecting the tampered proof and confirms + // the necessity of opening `fold_i` at r^{2^i} for i = 1, ..., log_n - 1. for (auto idx : mismatching_claim_indices) { EXPECT_FALSE(prover_opening_claims[idx].opening_pair == verifier_claims[idx].opening_pair); } } +// The prover commits to a higher degree polynomial than what is expected. The test considers the case where +// this polynomial folds down to a constant (equal to the claimed evaluation) after the expected number of rounds +// (due to the choice of the evaluation point). In this case, the verifier accepts. +TYPED_TEST(GeminiTest, HighDegreeAttackAccept) +{ + using Fr = typename TypeParam::ScalarField; + + this->is_big_ck = true; + + // Sample public opening point (u_0, u_1, u_2) + auto u = this->random_evaluation_point(this->small_log_n); + + // Choose a claimed eval at `u` + Fr claimed_multilinear_eval = Fr::random_element(); + + // poly is of high degrees, as the SRS allows for it + Polynomial poly(this->big_n); + + // Define poly to be of a specific form such that after small_log_n folds with u, it becomes a constant equal to + // claimed_multilinear_eval. + const Fr tail = ((Fr(1) - u[0]) * (Fr(1) - u[1])).invert(); + poly.at(4) = claimed_multilinear_eval * tail / u[2]; + poly.at(4088) = tail; + poly.at(4092) = -tail * (Fr(1) - u[2]) / u[2]; + + MockClaimGenerator mock_claims( + this->big_n, std::vector{ std::move(poly) }, std::vector{ claimed_multilinear_eval }, this->big_ck); + + this->execute_gemini_and_verify_claims(u, mock_claims); +} + +// The prover commits to a higher degree polynomial than what is expected. The test considers the case where +// this polynomial does not fold down to a constant after the expected number of rounds. In this case, the verifier +// rejects with high probabililty. +TYPED_TEST(GeminiTest, HighDegreeAttackReject) +{ + using Fr = typename TypeParam::ScalarField; + using Polynomial = bb::Polynomial; + + this->is_big_ck = true; + this->is_reject_case = true; + + // poly of high degree, as SRS allows for it + Polynomial poly = Polynomial::random(this->big_n); + + // Sample public opening point (u_0, u_1, u_2) + auto u = this->random_evaluation_point(this->small_log_n); + + // Choose a claimed eval at `u` + Fr claimed_multilinear_eval = Fr::random_element(); + + MockClaimGenerator mock_claims( + this->big_n, std::vector{ std::move(poly) }, std::vector{ claimed_multilinear_eval }, this->big_ck); + + this->execute_gemini_and_verify_claims(u, mock_claims); +} + template typename GeminiTest::CK GeminiTest::ck; template typename GeminiTest::VK GeminiTest::vk; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp index 50efcb38d808..7a58b086291c 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp @@ -80,16 +80,13 @@ std::vector::Claim> GeminiProver_::prove( Polynomial A_0 = polynomial_batcher.compute_batched(rho, running_scalar); // Construct the d-1 Gemini foldings of A₀(X) - std::vector fold_polynomials = compute_fold_polynomials(log_n, multilinear_challenge, A_0); + std::vector fold_polynomials = compute_fold_polynomials(log_n, multilinear_challenge, A_0, has_zk); // If virtual_log_n >= log_n, pad the fold commitments with dummy group elements [1]_1. for (size_t l = 0; l < virtual_log_n - 1; l++) { std::string label = "Gemini:FOLD_" + std::to_string(l + 1); - if (l < log_n - 1) { - transcript->send_to_verifier(label, commitment_key.commit(fold_polynomials[l])); - } else { - transcript->send_to_verifier(label, Commitment::one()); - } + // When has_zk is true, we are sending commitments to 0. Seems to work, but maybe brittle. + transcript->send_to_verifier(label, commitment_key.commit(fold_polynomials[l])); } const Fr r_challenge = transcript->template get_challenge("Gemini:r"); @@ -106,16 +103,11 @@ std::vector::Claim> GeminiProver_::prove( auto [A_0_pos, A_0_neg] = polynomial_batcher.compute_partially_evaluated_batch_polynomials(r_challenge); // Construct claims for the d + 1 univariate evaluations A₀₊(r), A₀₋(-r), and Foldₗ(−r^{2ˡ}), l = 1, ..., d-1 std::vector claims = construct_univariate_opening_claims( - log_n, std::move(A_0_pos), std::move(A_0_neg), std::move(fold_polynomials), r_challenge); + virtual_log_n, std::move(A_0_pos), std::move(A_0_neg), std::move(fold_polynomials), r_challenge); - // If virtual_log_n >= log_n, pad the negative fold evaluations with zeroes. for (size_t l = 1; l <= virtual_log_n; l++) { std::string label = "Gemini:a_" + std::to_string(l); - if (l <= log_n) { - transcript->send_to_verifier(label, claims[l].opening_pair.evaluation); - } else { - transcript->send_to_verifier(label, Fr::zero()); - } + transcript->send_to_verifier(label, claims[l].opening_pair.evaluation); } // If running Gemini for the Translator VM polynomials, A₀(r) = A₀₊(r) + P₊(rˢ) and A₀(-r) = A₀₋(-r) + P₋(rˢ) @@ -145,15 +137,18 @@ std::vector::Claim> GeminiProver_::prove( */ template std::vector::Polynomial> GeminiProver_::compute_fold_polynomials( - const size_t log_n, std::span multilinear_challenge, const Polynomial& A_0) + const size_t log_n, std::span multilinear_challenge, const Polynomial& A_0, const bool& has_zk) { const size_t num_threads = get_num_cpus_pow2(); + + const size_t virtual_log_n = multilinear_challenge.size(); + constexpr size_t efficient_operations_per_thread = 64; // A guess of the number of operation for which there // would be a point in sending them to a separate thread // Reserve and allocate space for m-1 Fold polynomials, the foldings of the full batched polynomial A₀ std::vector fold_polynomials; - fold_polynomials.reserve(log_n - 1); + fold_polynomials.reserve(virtual_log_n - 1); for (size_t l = 0; l < log_n - 1; ++l) { // size of the previous polynomial/2 const size_t n_l = 1 << (log_n - l - 1); @@ -198,6 +193,28 @@ std::vector::Polynomial> GeminiProver_::com A_l = A_l_fold; } + // Perform virtual rounds. + // After the first `log_n - 1` rounds, the prover's `fold` univariates stabilize. With ZK, the verifier multiplies + // the evaluations by 0, otherwise, when `virtual_log_n > log_n`, the prover honestly computes and sends the + // constant folds. + const auto& last = fold_polynomials.back(); + const Fr u_last = multilinear_challenge[log_n - 1]; + const Fr final_eval = last.at(0) + u_last * (last.at(1) - last.at(0)); + Polynomial const_fold(1); + // Temporary fix: when we're running a zk proof, the verifier uses a `padding_indicator_array`. So the evals in + // rounds past `log_n - 1` will be ignored. Hence the prover also needs to ignore them, otherwise Shplonk will fail. + const_fold.at(0) = final_eval * Fr(static_cast(!has_zk)); + fold_polynomials.emplace_back(const_fold); + + // FOLD_{log_n+1}, ..., FOLD_{d_v-1} + Fr tail = Fr(1); + for (size_t k = log_n; k < virtual_log_n - 1; ++k) { + tail *= (Fr(1) - multilinear_challenge[k]); // multiply by (1 - u_k) + Polynomial next_const(1); + next_const.at(0) = final_eval * tail * Fr(static_cast(!has_zk)); + fold_polynomials.emplace_back(next_const); + } + return fold_polynomials; }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.fuzzer.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.fuzzer.cpp index d5bd3da29531..45c9bba72009 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.fuzzer.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.fuzzer.cpp @@ -20,35 +20,35 @@ using Curve = curve::Grumpkin; CommitmentKey ck; VerifierCommitmentKey vk; /** - * @brief Class that allows us to call internal IPA methods, because it's friendly + * @brief Wrapper class that allows us to call IPA methods. * */ class ProxyCaller { public: template - static void compute_opening_proof_internal(const CommitmentKey& ck, - const ProverOpeningClaim& opening_claim, - const std::shared_ptr& transcript, - size_t poly_log_size) + static void compute_opening_proof(const CommitmentKey& ck, + const ProverOpeningClaim& opening_claim, + const std::shared_ptr& transcript, + size_t poly_log_size) { if (poly_log_size == 1) { - IPA::compute_opening_proof_internal(ck, opening_claim, transcript); + IPA::compute_opening_proof(ck, opening_claim, transcript); } if (poly_log_size == 2) { - IPA::compute_opening_proof_internal(ck, opening_claim, transcript); + IPA::compute_opening_proof(ck, opening_claim, transcript); } } template - static bool verify_internal(const VerifierCommitmentKey& vk, - const OpeningClaim& opening_claim, - const std::shared_ptr& transcript, - size_t poly_log_size) + static bool verify(const VerifierCommitmentKey& vk, + const OpeningClaim& opening_claim, + const std::shared_ptr& transcript, + size_t poly_log_size) { if (poly_log_size == 1) { - return IPA::reduce_verify_internal_native(vk, opening_claim, transcript); + return IPA::reduce_verify(vk, opening_claim, transcript); } if (poly_log_size == 2) { - return IPA::reduce_verify_internal_native(vk, opening_claim, transcript); + return IPA::reduce_verify(vk, opening_claim, transcript); } return false; } @@ -180,13 +180,13 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) } auto const opening_pair = OpeningPair{ x, poly.evaluate(x) }; auto const opening_claim = OpeningClaim{ opening_pair, ck.commit(poly) }; - ProxyCaller::compute_opening_proof_internal(ck, { poly, opening_pair }, transcript, log_size); + ProxyCaller::compute_opening_proof(ck, { poly, opening_pair }, transcript, log_size); // Reset challenge indices transcript->reset_indices(); // Should verify - if (!ProxyCaller::verify_internal(vk, opening_claim, transcript, log_size)) { + if (!ProxyCaller::verify(vk, opening_claim, transcript, log_size)) { return 1; } return 0; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index 2ee73e1ac4ee..77182ca17b63 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -26,12 +26,12 @@ #include namespace bb { -// clang-format off -// Note that an update of this constant requires updating the inputs to noir protocol circuit (rollup-base-private, rollup-base-public, -// rollup-block-merge, rollup-block-root, rollup-merge, rollup-root), as well as updating IPA_PROOF_LENGTH in other places. -static constexpr size_t IPA_PROOF_LENGTH = /* comms IPA_L and IPA_R */ 4 * CONST_ECCVM_LOG_N + - /* comm G_0 */ 2 + - /* eval a_0 */ 2; +// Note that an update of this constant requires updating the inputs to noir protocol circuit (rollup-base-private, +// rollup-base-public, rollup-block-merge, rollup-block-root, rollup-merge, rollup-root), as well as updating +// IPA_PROOF_LENGTH in other places. +static constexpr size_t IPA_PROOF_LENGTH = /* comms IPA_L and IPA_R */ 4 * CONST_ECCVM_LOG_N + + /* comm G_0 */ 2 + + /* eval a_0 */ 2; /** * @brief IPA (inner product argument) commitment scheme class. @@ -39,11 +39,10 @@ static constexpr size_t IPA_PROOF_LENGTH = /* comms IPA_L and IPA_R */ 4 * CONS *@details This implementation of IPA uses the optimized version that only multiplies half of the elements of each *vector in each prover round. The implementation uses: * -*1. An SRS (Structured Reference String) \f$\vec{G}=(G_0,G_1...,G_{d-1})\f$ with \f$G_i ∈ E(\mathbb{F}_p)\f$ and +*1. An CRS (Common Reference String) \f$\vec{G}=(G_0,G_1...,G_{d-1})\f$ with \f$G_i ∈ E(\mathbb{F}_p)\f$ and *\f$\mathbb{F}_r\f$ - the scalar field of the elliptic curve as well as \f$G\f$ which is an independent generator on -*the same curve. -*2. A polynomial \f$f(x)=\sum_{i=0}^{d-1}f_ix^i\f$ over field \f$F_r\f$, where the polynomial degree \f$d-1\f$ is such -*that \f$d=2^k\f$ +*the same curve. (Note: we occasionally might use the language of SRS instead of CRS; this is a mild abuse of language.) +*2. A polynomial \f$f(x)=\sum_{i=0}^{d-1}f_ix^i\f$ over field \f$\mathbb{F}_r\f$, where \f$d=2^k\f$ * *The opening and verification procedures expect that there already exists a commitment to \f$f(x)\f$ which is the *scalar product \f$[f(x)]=\langle\vec{f},\vec{G}\rangle\f$, where \f$\vec{f}=(f_0, f_1,..., f_{d-1})\f$​ @@ -93,62 +92,68 @@ can reduce initial commitment to the result \f$\langle \vec{a},\vec{b}\rangle U\ documentation */ template class IPA { - public: - using Curve = Curve_; - using Fr = typename Curve::ScalarField; - using GroupElement = typename Curve::Element; - using Commitment = typename Curve::AffineElement; - using CK = CommitmentKey; - using VK = VerifierCommitmentKey; - using VerifierAccumulator = stdlib::recursion::honk::IpaAccumulator; - - // Compute the length of the vector of coefficients of a polynomial being opened. - static constexpr size_t poly_length = 1UL<; + using VK = VerifierCommitmentKey; + + // records the `u_challenges_inv`, the Pederson commitment to the `h` -polynomial, a.k.a. the challenge + // polynomial, given as ∏_{i ∈ [k]} (1 + u_{len-i}^{-1}.X^{2^{i-1}}), and the running truth value of the IPA + // accumulation claim. + using VerifierAccumulator = stdlib::recursion::honk::IpaAccumulator; + + // Compute the length of the vector of coefficients of a polynomial being opened. + static constexpr size_t poly_length = 1UL << log_poly_length; + +// These allow access to internal functions so that we can never use a mock transcript unless it's fuzzing or testing of +// IPA specifically #ifdef IPA_TEST - FRIEND_TEST(IPATest, ChallengesAreZero); - FRIEND_TEST(IPATest, AIsZeroAfterOneRound); + FRIEND_TEST(IPATest, ChallengesAreZero); + FRIEND_TEST(IPATest, AIsZeroAfterOneRound); #endif #ifdef IPA_FUZZ_TEST - friend class ProxyCaller; + friend class ProxyCaller; #endif - /** - * @brief Compute an inner product argument proof for opening a single polynomial at a single evaluation point. - * - * @tparam Transcript Transcript type. Useful for testing - * @param ck The commitment key containing srs - * @param opening_pair (challenge, evaluation) - * @param polynomial The witness polynomial whose opening proof needs to be computed - * @param transcript Prover transcript - * https://github.com/AztecProtocol/aztec-packages/pull/3434 - * - *@details For a vector \f$\vec{v}=(v_0,v_1,...,v_{2n-1})\f$ of length \f$2n\f$ we'll denote - *\f$\vec{v}_{low}=(v_0,v_1,...,v_{n-1})\f$ and \f$\vec{v}_{high}=(v_{n},v_{n+1},...v_{2n-1})\f$. The procedure runs - *as follows: - * - *1. Send the degree of \f$f(x)\f$ plus one, equal to \f$d\f$ to the verifier - *2. Receive the generator challenge \f$u\f$ from the verifier. If it is zero, abort - *3. Compute the auxiliary generator \f$U=u\cdot G\f$, where \f$G\f$ is a generator of \f$E(\mathbb{F}_p)\f$​ - *4. Set \f$\vec{G}_{k}=\vec{G}\f$, \f$\vec{a}_{k}=\vec{p}\f$ where \f$vec{p}\f$ represent the polynomial's - *coefficients -. *5. Compute the vector \f$\vec{b}_{k}=(1,\beta,\beta^2,...,\beta^{d-1})\f$ where \f$p(\beta)$\f is the - evaluation we wish to prove. - *6. Perform \f$k\f$ rounds (for \f$i \in \{k,...,1\}\f$) of: - * 1. Compute - \f$L_{i-1}=\langle\vec{a}_{i\_low},\vec{G}_{i\_high}\rangle+\langle\vec{a}_{i\_low},\vec{b}_{i\_high}\rangle\cdot - U\f$​ - * 2. Compute - *\f$R_{i-1}=\langle\vec{a}_{i\_high},\vec{G}_{i\_low}\rangle+\langle\vec{a}_{i\_high},\vec{b}_{i\_low}\rangle\cdot - U\f$ - * 3. Send \f$L_{i-1}\f$ and \f$R_{i-1}\f$ to the verifier - * 4. Receive round challenge \f$u_{i-1}\f$ from the verifier​, if it is zero, abort - * 5. Compute \f$\vec{G}_{i-1}=\vec{G}_{i\_low}+u_{i-1}^{-1}\cdot \vec{G}_{i\_high}\f$ - * 6. Compute \f$\vec{a}_{i-1}=\vec{a}_{i\_low}+u_{i-1}\cdot \vec{a}_{i\_high}\f$ - * 7. Compute \f$\vec{b}_{i-1}=\vec{b}_{i\_low}+u_{i-1}^{-1}\cdot \vec{b}_{i\_high}\f$​ - * - *7. Send the final \f$\vec{a}_{0} = (a_0)\f$ to the verifier - */ + + /** + * @brief Compute an inner product argument proof for opening a single polynomial at a single evaluation point. + * + * @tparam Transcript Transcript type. Useful for testing + * @param ck The commitment key containing srs + * @param opening_pair (challenge, evaluation) + * @param polynomial The witness polynomial whose opening proof needs to be computed + * @param transcript Prover transcript + * https://github.com/AztecProtocol/aztec-packages/pull/3434 + * + *@details For a vector \f$\vec{v}=(v_0,v_1,...,v_{2n-1})\f$ of length \f$2n\f$ we'll denote + *\f$\vec{v}_{low}=(v_0,v_1,...,v_{n-1})\f$ and \f$\vec{v}_{high}=(v_{n},v_{n+1},...v_{2n-1})\f$. The procedure runs + *as follows: + * + *1. We assume that the hash buffer has been populated by the opening claim. (This is done in a different method.) + *2. Receive the generator challenge \f$u\f$ from the verifier. If it is zero, abort + *3. Compute the auxiliary generator \f$U=u\cdot G\f$, where \f$G\f$ is a generator of \f$E(\mathbb{F}_p)\f$​ + *4. Set \f$\vec{G}_{k}=\vec{G}\f$, \f$\vec{a}_{k}=\vec{p}\f$ where \f$vec{p}\f$ represent the polynomial's + *coefficients + . *5. Compute the vector \f$\vec{b}_{k}=(1,\beta,\beta^2,...,\beta^{d-1})\f$ where \f$p(\beta)$\f is the + evaluation we wish to prove. + *6. Perform \f$k\f$ rounds (for \f$i \in \{k,...,1\}\f$) of: + * 1. Compute + \f$L_{i-1}=\langle\vec{a}_{i\_low},\vec{G}_{i\_high}\rangle+\langle\vec{a}_{i\_low},\vec{b}_{i\_high}\rangle\cdot + U\f$​ + * 2. Compute + *\f$R_{i-1}=\langle\vec{a}_{i\_high},\vec{G}_{i\_low}\rangle+\langle\vec{a}_{i\_high},\vec{b}_{i\_low}\rangle\cdot + U\f$ + * 3. Send \f$L_{i-1}\f$ and \f$R_{i-1}\f$ to the verifier + * 4. Receive round challenge \f$u_{i-1}\f$ from the verifier​, if it is zero, abort + * 5. Compute \f$\vec{G}_{i-1}=\vec{G}_{i\_low}+u_{i-1}^{-1}\cdot \vec{G}_{i\_high}\f$ + * 6. Compute \f$\vec{a}_{i-1}=\vec{a}_{i\_low}+u_{i-1}\cdot \vec{a}_{i\_high}\f$ + * 7. Compute \f$\vec{b}_{i-1}=\vec{b}_{i\_low}+u_{i-1}^{-1}\cdot \vec{b}_{i\_high}\f$​ + * + *7. Send the final \f$\vec{a}_{0} = (a_0)\f$ to the verifier + */ template static void compute_opening_proof_internal(const CK& ck, const ProverOpeningClaim& opening_claim, @@ -156,22 +161,8 @@ template class IPA { const bb::Polynomial& polynomial = opening_claim.polynomial; - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1150): Hash more things here. - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1408): Make IPA fuzzer compatible with `add_to_hash_buffer`. - // // Step 1. - // Add the commitment, challenge, and evaluation to the hash buffer. - // NOTE: - // a. This is a bit inefficient, as the prover otherwise doesn't need this commitment. - // However, the effect to performance of this MSM (in practice of size 2^16) is tiny. - // b. Note that we add these three pieces of information to the hash buffer, as opposed to - // calling the `send_to_verifier` method, as the verifier knows them. - - const auto commitment = ck.commit(polynomial); - transcript->template add_to_hash_buffer("IPA:commitment", commitment); - transcript->template add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); - transcript->template add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); - + // Done in `add_claim_to_hash_buffer`. // Step 2. // Receive challenge for the auxiliary generator @@ -182,7 +173,12 @@ template class IPA } // Step 3. - // Compute auxiliary generator U + // Compute auxiliary generator U, which is used to bind together the inner product claim and the commitment. + // This yields the binding property because we assume it is computationally difficult to find a linear relation + // between the CRS and `Commitment::one()`. + // Compute auxiliary generator U, which is used to bind together the inner product claim and the commitment. + // This yields the binding property because we assume it is computationally difficult to find a linear relation + // between the CRS and `Commitment::one()`. auto aux_generator = Commitment::one() * generator_challenge; // Checks poly_degree is greater than zero and a power of two @@ -203,10 +199,7 @@ template class IPA // Copy the SRS into a local data structure as we need to mutate this vector for every round parallel_for_heuristic( - poly_length, - [&](size_t i) { - G_vec_local[i] = srs_elements[i]; - }, thread_heuristics::FF_COPY_COST); + poly_length, [&](size_t i) { G_vec_local[i] = srs_elements[i]; }, thread_heuristics::FF_COPY_COST); // Step 5. // Compute vector b (vector of the powers of the challenge) @@ -220,7 +213,8 @@ template class IPA b_vec[i] = b_power; b_power *= opening_pair.challenge; } - }, thread_heuristics::FF_COPY_COST + thread_heuristics::FF_MULTIPLICATION_COST); + }, + thread_heuristics::FF_COPY_COST + thread_heuristics::FF_MULTIPLICATION_COST); // Iterate for log(poly_degree) rounds to compute the round commitments. @@ -236,29 +230,32 @@ template class IPA // Run scalar products in parallel auto inner_prods = parallel_for_heuristic( round_size, - std::pair{Fr::zero(), Fr::zero()}, + std::pair{ Fr::zero(), Fr::zero() }, [&](size_t j, std::pair& inner_prod_left_right) { // Compute inner_prod_L := < a_vec_lo, b_vec_hi > inner_prod_left_right.first += a_vec[j] * b_vec[round_size + j]; // Compute inner_prod_R := < a_vec_hi, b_vec_lo > inner_prod_left_right.second += a_vec[round_size + j] * b_vec[j]; - }, thread_heuristics::FF_ADDITION_COST * 2 + thread_heuristics::FF_MULTIPLICATION_COST * 2); + }, + thread_heuristics::FF_ADDITION_COST * 2 + thread_heuristics::FF_MULTIPLICATION_COST * 2); // Sum inner product contributions computed in parallel and unpack the std::pair auto [inner_prod_L, inner_prod_R] = sum_pairs(inner_prods); // Step 6.a (using letters, because doxygen automatically converts the sublist counters to letters :( ) // L_i = < a_vec_lo, G_vec_hi > + inner_prod_L * aux_generator - L_i = scalar_multiplication::pippenger_unsafe({0, {&a_vec.at(0), /*size*/ round_size}},{&G_vec_local[round_size], round_size}); + L_i = scalar_multiplication::pippenger_unsafe({ 0, { &a_vec.at(0), /*size*/ round_size } }, + { &G_vec_local[round_size], round_size }); L_i += aux_generator * inner_prod_L; // Step 6.b // R_i = < a_vec_hi, G_vec_lo > + inner_prod_R * aux_generator - R_i = scalar_multiplication::pippenger_unsafe({0, {&a_vec.at(round_size), /*size*/ round_size}},{&G_vec_local[0], /*size*/ round_size}); + R_i = scalar_multiplication::pippenger_unsafe({ 0, { &a_vec.at(round_size), /*size*/ round_size } }, + { &G_vec_local[0], /*size*/ round_size }); R_i += aux_generator * inner_prod_R; // Step 6.c - // Send commitments to the verifier + // Send L_i and R_i to the verifier std::string index = std::to_string(log_poly_length - i - 1); transcript->send_to_verifier("IPA:L_" + index, Commitment(L_i)); transcript->send_to_verifier("IPA:R_" + index, Commitment(R_i)); @@ -292,7 +289,8 @@ template class IPA [&](size_t j) { a_vec.at(j) += round_challenge * a_vec[round_size + j]; b_vec[j] += round_challenge_inv * b_vec[round_size + j]; - }, thread_heuristics::FF_ADDITION_COST * 2 + thread_heuristics::FF_MULTIPLICATION_COST * 2); + }, + thread_heuristics::FF_ADDITION_COST * 2 + thread_heuristics::FF_MULTIPLICATION_COST * 2); } // Step 7 @@ -303,6 +301,37 @@ template class IPA // Send a_0 to the verifier transcript->send_to_verifier("IPA:a_0", a_vec[0]); } + /** + * @brief Add the opening claim to the hash buffer. + * + * @details We add the commitment, challenge, and claimed evaluation to the hash buffer. + * @tparam Transcript + * @param ck + * @param opening_claim + * @param transcript + * @note This requires us to explicitly compute the commitment. + * @note We enact this separation to allow for more ergonomic failure tests. + */ + template + static void add_claim_to_hash_buffer(const CK& ck, + const ProverOpeningClaim& opening_claim, + const std::shared_ptr& transcript) + { + const bb::Polynomial& polynomial = opening_claim.polynomial; + + // Step 1. + // Add the commitment, challenge, and evaluation to the hash buffer. + // NOTE: + // a. This is a bit inefficient, as the prover otherwise doesn't need this commitment. + // However, the effect to performance of this MSM (in practice of size 2^16) is tiny. + // b. Note that we add these three pieces of information to the hash buffer, as opposed to + // calling the `send_to_verifier` method, as the verifier knows them. + + const auto commitment = ck.commit(polynomial); + transcript->add_to_hash_buffer("IPA:commitment", commitment); + transcript->add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); + transcript->add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); + } /** * @brief Natively verify the correctness of a Proof @@ -316,9 +345,9 @@ template class IPA * * @details The procedure runs as follows: * - *1. Receive \f$d\f$ (polynomial degree plus one) from the prover + *1. Receive commitment, challenge, and claimed evaluation from the prover *2. Receive the generator challenge \f$u\f$, abort if it's zero, otherwise compute \f$U=u\cdot G\f$ - *3. Compute \f$C'=C+f(\beta)\cdot U\f$ + *3. Compute \f$C'=C+f(\beta)\cdot U\f$. (Recall that \f$f(\beta)\f$ is the claimed evaluation.) *4. Receive \f$L_j, R_j\f$ and compute challenges \f$u_j\f$ for \f$j \in {k-1,..,0}\f$, abort immediately on receiving a \f$u_j=0\f$ *5. Compute \f$C_0 = C' + \sum_{j=0}^{k-1}(u_j^{-1}L_j + u_jR_j)\f$ @@ -329,16 +358,11 @@ template class IPA *10. Compute \f$C_{right}=a_{0}G_{s}+a_{0}b_{0}U\f$ *11. Check that \f$C_{right} = C_0\f$. If they match, return true. Otherwise return false. */ - static bool reduce_verify_internal_native(const VK& vk, - const OpeningClaim& opening_claim, - auto& transcript) + static bool reduce_verify_internal_native(const VK& vk, const OpeningClaim& opening_claim, auto& transcript) requires(!Curve::is_stdlib_type) { - // Step 1. - // Add the commitment, challenge, and evaluation to the hash buffer. - transcript->template add_to_hash_buffer("IPA:commitment", opening_claim.commitment); - transcript->template add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); - transcript->template add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); + // Step 1 + // Done by `add_claim_to_hash_buffer`. // Step 2. // Receive generator challenge u and compute auxiliary generator @@ -348,25 +372,25 @@ template class IPA throw_or_abort("The generator challenge can't be zero"); } - Commitment aux_generator = Commitment::one() * generator_challenge; + const Commitment aux_generator = Commitment::one() * generator_challenge; // Step 3. - // Compute C' = C + f(\beta) ⋅ U - GroupElement C_prime = opening_claim.commitment + (aux_generator * opening_claim.opening_pair.evaluation); + // Compute C' = C + f(\beta) ⋅ U, i.e., the _joint_ commitment of f and f(\beta). + const GroupElement C_prime = opening_claim.commitment + (aux_generator * opening_claim.opening_pair.evaluation); - auto pippenger_size = 2 * log_poly_length; + const auto pippenger_size = 2 * log_poly_length; std::vector round_challenges(log_poly_length); // the group elements that will participate in our MSM. - std::vector msm_elements(pippenger_size); + std::vector msm_elements(pippenger_size); // L_{k-1}, R_{k-1}, L_{k-2}, ..., L_0, R_0. // the scalars that will participate in our MSM. - std::vector msm_scalars(pippenger_size); + std::vector msm_scalars(pippenger_size); // w_{k-1}^{-1}, w_{k-1}, ..., w_{0}^{-1}, w_{0}. // Step 4. // Receive all L_i and R_i and populate msm_elements. for (size_t i = 0; i < log_poly_length; i++) { std::string index = std::to_string(log_poly_length - i - 1); - auto element_L = transcript->template receive_from_prover("IPA:L_" + index); - auto element_R = transcript->template receive_from_prover("IPA:R_" + index); + const auto element_L = transcript->template receive_from_prover("IPA:L_" + index); + const auto element_R = transcript->template receive_from_prover("IPA:R_" + index); round_challenges[i] = transcript->template get_challenge("IPA:round_challenge_" + index); if (round_challenges[i].is_zero()) { throw_or_abort("Round challenges can't be zero"); @@ -385,23 +409,19 @@ template class IPA } // Step 5. - // Compute C₀ = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j - GroupElement LR_sums = scalar_multiplication::pippenger_unsafe({0, {&msm_scalars[0], /*size*/ pippenger_size}},{&msm_elements[0], /*size*/ pippenger_size}); + // Compute C_zero = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j + GroupElement LR_sums = scalar_multiplication::pippenger_unsafe( + { 0, { &msm_scalars[0], /*size*/ pippenger_size } }, { &msm_elements[0], /*size*/ pippenger_size }); GroupElement C_zero = C_prime + LR_sums; // Step 6. - // Compute b_zero where b_zero can be computed using the polynomial: - // g(X) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}.X^{2^{i-1}}). - // b_zero = g(evaluation) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}. (evaluation)^{2^{i-1}}) - Fr b_zero = Fr::one(); - for (size_t i = 0; i < log_poly_length; i++) { - b_zero *= Fr::one() + (round_challenges_inv[log_poly_length - 1 - i] * - opening_claim.opening_pair.challenge.pow(1 << i)); - } + // Compute b_zero succinctly + const Fr b_zero = evaluate_challenge_poly(round_challenges_inv, opening_claim.opening_pair.challenge); // Step 7. // Construct vector s - Polynomial s_poly(construct_poly_from_u_challenges_inv(std::span(round_challenges_inv).subspan(0, log_poly_length))); + Polynomial s_vec( + construct_poly_from_u_challenges_inv(std::span(round_challenges_inv).subspan(0, log_poly_length))); std::span srs_elements = vk.get_monomial_points(); if (poly_length > srs_elements.size()) { @@ -409,56 +429,86 @@ template class IPA } // Step 8. - // Compute G₀ - Commitment G_zero = scalar_multiplication::pippenger_unsafe(s_poly,{&srs_elements[0], /*size*/ poly_length}); + // Compute G_zero + Commitment G_zero = + scalar_multiplication::pippenger_unsafe(s_vec, { &srs_elements[0], /*size*/ poly_length }); Commitment G_zero_sent = transcript->template receive_from_prover("IPA:G_0"); BB_ASSERT_EQ(G_zero, G_zero_sent, "G_0 should be equal to G_0 sent in transcript. IPA verification fails."); // Step 9. - // Receive a₀ from the prover + // Receive a_zero from the prover auto a_zero = transcript->template receive_from_prover("IPA:a_0"); // Step 10. - // Compute C_right + // Compute C_right. Implicitly, this is an IPA statement for the length 1 vectors and together with + // the URS G_0. GroupElement right_hand_side = G_zero * a_zero + aux_generator * a_zero * b_zero; // Step 11. - // Check if C_right == C₀ + // Check if C_right == C_zero return (C_zero.normalize() == right_hand_side.normalize()); } + /** - * @brief Recursively verify the correctness of an IPA proof, without computing G_zero. Unlike native verification, there is no - * parallelisation in this function as our circuit construction does not currently support parallelisation. + * @brief Add the opening claim to the hash buffer. * - * @details batch_mul is used instead of pippenger as pippenger is not implemented to be used in stdlib context for - * now and under the hood we perform bigfield to cycle_scalar conversions for the batch_mul. That is because - * cycle_scalar has very reduced functionality at the moment and doesn't support basic arithmetic operations between - * two cycle_scalar operands (just for one cycle_group and one cycle_scalar to enable batch_mul). + * @details We add the commitment, challenge, and claimed evaluation to the hash buffer. + * @tparam Transcript + * @param opening_claim + * @param transcript + * @note This requires us to explicitly compute the commitment. + * @note We enact this separation to allow for more ergonomic failure tests. + */ + template + static void add_claim_to_hash_buffer(const OpeningClaim& opening_claim, + const std::shared_ptr& transcript) + { + + // Step 1. + // Add the commitment, challenge, and evaluation to the hash buffer. + + transcript->add_to_hash_buffer("IPA:commitment", opening_claim.commitment); + transcript->add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); + transcript->add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); + } + /** + * @brief Recursively verify the correctness of an IPA proof, without computing G_0. This is therefore a "partial + * verification", where the verifier takes the Prover's G_0 "at face value". + * + * @details Unlike native verification, there is no parallelisation (for the MSM computation) in this function as + * our circuit construction does not currently support parallelisation. batch_mul is used instead of pippenger as + * pippenger is not implemented to be used in stdlib context for now and under the hood we perform bigfield to + * cycle_scalar conversions for the batch_mul. That is because cycle_scalar has very reduced functionality at the + * moment and doesn't support basic arithmetic operations between two cycle_scalar operands (just for one + * cycle_group and one cycle_scalar to enable batch_mul). * @param vk * @param opening_claim * @param transcript - * @return VerifierAccumulator + * @return VerifierAccumulator, i.e., the u_inv challenges, the claimed Pederson commitment to the + * challenge polynomial derived from the u_inv challenges, and a boolean recording whether the last accumulation + * step failed or passed. */ static VerifierAccumulator reduce_verify_internal_recursive(const OpeningClaim& opening_claim, - auto& transcript) + auto& transcript) requires Curve::is_stdlib_type { // Step 1. - // Add the commitment, challenge, and evaluation to the hash buffer. - transcript->template add_to_hash_buffer("IPA:commitment", opening_claim.commitment); - transcript->template add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); - transcript->template add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); + // Done by `add_claim_to_hash_buffer`. // Step 2. // Receive generator challenge u and compute auxiliary generator const Fr generator_challenge = transcript->template get_challenge("IPA:generator_challenge"); typename Curve::Builder* builder = generator_challenge.get_context(); - auto pippenger_size = 2 * log_poly_length; + auto pippenger_size = + 2 * log_poly_length + + 2; // the only check we perform will involve an MSM. we make the MSM "as big as possible" for efficiency, + // which is why `pippenger_size` is bigger here than in the native verifier. std::vector round_challenges(log_poly_length); std::vector round_challenges_inv(log_poly_length); - std::vector msm_elements(pippenger_size); - std::vector msm_scalars(pippenger_size); - + std::vector msm_elements( + pippenger_size); // L_{k-1}, R_{k-1}, L_{k-2}, ..., L_0, R_0, -G_0, -Commitment::one() + std::vector msm_scalars(pippenger_size); // w_{k-1}^{-1}, w_{k-1}, ..., w_{0}^{-1}, w_{0}, a_0, (a_0 * b_0 - + // f(\beta)) * generator_challenge // Step 3. // Receive all L_i and R_i and prepare for MSM @@ -473,48 +523,35 @@ template class IPA msm_elements[2 * i] = element_L; msm_elements[2 * i + 1] = element_R; msm_scalars[2 * i] = round_challenges_inv[i]; - msm_scalars[2 * i + 1] = round_challenges[i]; + msm_scalars[2 * i + 1] = round_challenges[i]; } - // Step 4. - // Compute b_zero where b_zero can be computed using the polynomial: - // g(X) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}.X^{2^{i-1}}). - // b_zero = g(evaluation) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}. (evaluation)^{2^{i-1}}) - - Fr b_zero = Fr(1); - Fr challenge = opening_claim.opening_pair.challenge; - for (size_t i = 0; i < log_poly_length; i++) { - - Fr monomial = round_challenges_inv[log_poly_length - 1 - i] * challenge; - b_zero *= Fr(1) + monomial; - if (i != log_poly_length - 1) // this if statement is fine because the number of iterations is constant - { - challenge = challenge.sqr(); - } - } + // Step 4. + // Compute b_zero succinctly + Fr b_zero = evaluate_challenge_poly(round_challenges_inv, opening_claim.opening_pair.challenge); // Step 5. - // Receive G₀ from the prover + // Receive G_zero from the prover Commitment G_zero = transcript->template receive_from_prover("IPA:G_0"); // Step 6. - // Receive a₀ from the prover + // Receive a_zero from the prover const auto a_zero = transcript->template receive_from_prover("IPA:a_0"); // Step 7. - // Compute R = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j - G₀ * a₀ - (f(\beta) + a₀ * b₀) ⋅ U - // This is a combination of several IPA relations into a large batch mul - // which should be equal to -C + // Compute R = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j - G₀ * a₀ - (f(\beta) - a₀ * b₀) ⋅ U + // If everything is correct, then R == -C, as C':= C + f(\beta) ⋅ U msm_elements.emplace_back(-G_zero); msm_elements.emplace_back(-Commitment::one(builder)); msm_scalars.emplace_back(a_zero); - msm_scalars.emplace_back(generator_challenge * a_zero.madd(b_zero, {-opening_claim.opening_pair.evaluation})); + msm_scalars.emplace_back(generator_challenge * a_zero.madd(b_zero, { -opening_claim.opening_pair.evaluation })); GroupElement ipa_relation = GroupElement::batch_mul(msm_elements, msm_scalars); auto neg_commitment = -opening_claim.commitment; - ipa_relation.assert_equal(neg_commitment); + // the below is the only constraint. + ipa_relation.assert_equal(neg_commitment); - return { round_challenges_inv, G_zero}; + return { round_challenges_inv, G_zero, ipa_relation.get_value() == -opening_claim.commitment.get_value() }; } /** @@ -528,10 +565,12 @@ template class IPA * @remark Detailed documentation can be found in \link IPA::compute_opening_proof_internal * compute_opening_proof_internal \endlink. */ + template static void compute_opening_proof(const CK& ck, const ProverOpeningClaim& opening_claim, - const std::shared_ptr& transcript) + const std::shared_ptr& transcript) { + add_claim_to_hash_buffer(ck, opening_claim, transcript); compute_opening_proof_internal(ck, opening_claim, transcript); } @@ -546,16 +585,18 @@ template class IPA * *@remark The verification procedure documentation is in \link IPA::verify_internal verify_internal \endlink */ + template static bool reduce_verify(const VK& vk, - const OpeningClaim& opening_claim, - const auto& transcript) + const OpeningClaim& opening_claim, + const std::shared_ptr& transcript) requires(!Curve::is_stdlib_type) { + add_claim_to_hash_buffer(opening_claim, transcript); return reduce_verify_internal_native(vk, opening_claim, transcript); } /** - * @brief Recursively verify the correctness of a proof + * @brief Recursively _partially_ verify the correctness of an IPA proof. * * @param vk Verification_key containing srs * @param opening_claim Contains the commitment C and opening pair \f$(\beta, f(\beta))\f$ @@ -565,136 +606,79 @@ template class IPA * *@remark The verification procedure documentation is in \link IPA::verify_internal verify_internal \endlink */ - static VerifierAccumulator reduce_verify(const OpeningClaim& opening_claim, - const auto& transcript) + static VerifierAccumulator reduce_verify(const OpeningClaim& opening_claim, const auto& transcript) requires(Curve::is_stdlib_type) { + // The output of `reduce_verify_internal_recursive` consists of a `VerifierAccumulator` and a boolean, recording + // the truth value of the last verifier-compatibility check. This simply forgets the boolean and returns the + // `VerifierAccumulator`. + add_claim_to_hash_buffer(opening_claim, transcript); return reduce_verify_internal_recursive(opening_claim, transcript); } /** - * @brief Fully recursively verify the correctness of an IPA proof, including computing G_zero. Unlike native verification, there is no - * parallelisation in this function as our circuit construction does not currently support parallelisation. + * @brief Fully recursively verify the correctness of an IPA proof, _including_ computing G_0. * - * @details batch_mul is used instead of pippenger as pippenger is not implemented to be used in stdlib context for - * now and under the hood we perform bigfield to cycle_scalar conversions for the batch_mul. That is because - * cycle_scalar has very reduced functionality at the moment and doesn't support basic arithmetic operations between - * two cycle_scalar operands (just for one cycle_group and one cycle_scalar to enable batch_mul). + * @details Unlike native verification, there is no parallelisation in this function as our circuit construction + * does not currently support parallelisation. batch_mul is used instead of pippenger as pippenger is not + * implemented to be used in stdlib context for now and under the hood we perform bigfield to cycle_scalar + * conversions for the batch_mul. That is because cycle_scalar has very reduced functionality at the moment and + * doesn't support basic arithmetic operations between two cycle_scalar operands (just for one cycle_group and one + * cycle_scalar to enable batch_mul). * @param vk * @param opening_claim * @param transcript * @return VerifierAccumulator + * + * @note This methods calls `reduce_verify_internal_recursive` and additionally, computes G_zero, and adds the + * _constraint_ that this matches with what the Prover sends. */ - static bool full_verify_recursive(const VK& vk, - const OpeningClaim& opening_claim, - auto& transcript) + static bool full_verify_recursive(const VK& vk, const OpeningClaim& opening_claim, auto& transcript) requires Curve::is_stdlib_type { - // Step 1. - // Add the commitment, challenge, and evaluation to the hash buffer. - transcript->template add_to_hash_buffer("IPA:commitment", opening_claim.commitment); - transcript->template add_to_hash_buffer("IPA:challenge", opening_claim.opening_pair.challenge); - transcript->template add_to_hash_buffer("IPA:evaluation", opening_claim.opening_pair.evaluation); - - // Step 2. - // Receive generator challenge u and compute auxiliary generator - const Fr generator_challenge = transcript->template get_challenge("IPA:generator_challenge"); - typename Curve::Builder* builder = generator_challenge.get_context(); - - static constexpr size_t pippenger_size = 2 * log_poly_length; - std::vector round_challenges(log_poly_length); - std::vector round_challenges_inv(log_poly_length); - std::vector msm_elements(pippenger_size); - std::vector msm_scalars(pippenger_size); - - - // Step 3. - // Receive all L_i and R_i and prepare for MSM - for (size_t i = 0; i < log_poly_length; i++) { - - std::string index = std::to_string(log_poly_length - i - 1); - auto element_L = transcript->template receive_from_prover("IPA:L_" + index); - auto element_R = transcript->template receive_from_prover("IPA:R_" + index); - round_challenges[i] = transcript->template get_challenge("IPA:round_challenge_" + index); - round_challenges_inv[i] = round_challenges[i].invert(); - - msm_elements[2 * i] = element_L; - msm_elements[2 * i + 1] = element_R; - msm_scalars[2 * i] = round_challenges_inv[i]; - msm_scalars[2 * i + 1] = round_challenges[i]; - } - - // Step 4. - // Compute b_zero where b_zero can be computed using the polynomial: - // g(X) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}.X^{2^{i-1}}). - // b_zero = g(evaluation) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}. (evaluation)^{2^{i-1}}) - - Fr b_zero = Fr(1); - Fr challenge = opening_claim.opening_pair.challenge; - for (size_t i = 0; i < log_poly_length; i++) { - - Fr monomial = round_challenges_inv[log_poly_length - 1 - i] * challenge; - b_zero *= Fr(1) + monomial; - if (i != log_poly_length - 1) // this if statement is fine because the number of iterations is constant - { - challenge = challenge.sqr(); - } - } + add_claim_to_hash_buffer(opening_claim, transcript); + VerifierAccumulator verifier_accumulator = reduce_verify_internal_recursive(opening_claim, transcript); + auto round_challenges_inv = verifier_accumulator.u_challenges_inv; + auto claimed_G_zero = verifier_accumulator.comm; - // Step 5. - // Construct vector s + // Construct vector s, whose rth entry is ∏ (u_i)^{-1 * r_i}, where (r_i) is the binary expansion of r. This + // is required to _compute_ G_zero (rather than just passively receive G_zero from the Prover). + // // We implement a linear-time algorithm to optimally compute this vector - // Note: currently requires an extra vector of size `poly_length / 2` to cache temporaries - // this might able to be optimized if we care enough, but the size of this poly shouldn't be large relative to the builder polynomial sizes + // Note: currently requires an extra vector of size + // `poly_length / 2` to cache temporaries + // this might able to be optimized if we care enough, but the size of this poly shouldn't be large + // relative to the builder polynomial sizes std::vector s_vec_temporaries(poly_length / 2); std::vector s_vec(poly_length); Fr* previous_round_s = &s_vec_temporaries[0]; Fr* current_round_s = &s_vec[0]; // if number of rounds is even we need to swap these so that s_vec always contains the result - if constexpr ((log_poly_length & 1) == 0) - { + if constexpr ((log_poly_length & 1) == 0) { std::swap(previous_round_s, current_round_s); } previous_round_s[0] = Fr(1); - for (size_t i = 0; i < log_poly_length; ++i) - { + for (size_t i = 0; i < log_poly_length; ++i) { const size_t round_size = 1 << (i + 1); const Fr round_challenge = round_challenges_inv[i]; - for (size_t j = 0; j < round_size / 2; ++j) - { + for (size_t j = 0; j < round_size / 2; ++j) { current_round_s[j * 2] = previous_round_s[j]; current_round_s[j * 2 + 1] = previous_round_s[j] * round_challenge; } std::swap(current_round_s, previous_round_s); } - // Receive G₀ from the prover - Commitment transcript_G_zero = transcript->template receive_from_prover("IPA:G_0"); - // Compute G₀ - // Unlike the native verification function, the verifier commitment key only containts the SRS so we can apply - // batch_mul directly on it. - const std::vector srs_elements = vk.get_monomial_points(); - Commitment G_zero = Commitment::batch_mul(srs_elements, s_vec); - transcript_G_zero.assert_equal(G_zero); - BB_ASSERT_EQ(G_zero.get_value(), transcript_G_zero.get_value(), "G_zero doesn't match received G_zero."); - // Step 6. - // Receive a₀ from the prover - const auto a_zero = transcript->template receive_from_prover("IPA:a_0"); - - // Step 7. - // Compute R = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j - G₀ * a₀ - (f(\beta) + a₀ * b₀) ⋅ U - // This is a combination of several IPA relations into a large batch mul - // which should be equal to -C - msm_elements.emplace_back(-G_zero); - msm_elements.emplace_back(-Commitment::one(builder)); - msm_scalars.emplace_back(a_zero); - msm_scalars.emplace_back(generator_challenge * a_zero.madd(b_zero, {-opening_claim.opening_pair.evaluation})); - GroupElement ipa_relation = GroupElement::batch_mul(msm_elements, msm_scalars); - auto neg_commitment = -opening_claim.commitment; - ipa_relation.assert_equal(neg_commitment); + // Compute G_zero + // In the native verifier, this uses pippenger. Here we were batch_mul. + const std::vector srs_elements = vk.get_monomial_points(); + Commitment computed_G_zero = Commitment::batch_mul(srs_elements, s_vec); + // check the computed G_zero and the claimed G_zero are the same. + claimed_G_zero.assert_equal(computed_G_zero); + BB_ASSERT_EQ(computed_G_zero.get_value(), claimed_G_zero.get_value(), "G_zero doesn't match received G_zero."); - return (ipa_relation.get_value() == -opening_claim.commitment.get_value()); + bool running_truth_value = verifier_accumulator.running_truth_value; + return running_truth_value; } /** @@ -702,12 +686,14 @@ template class IPA * and scalars and a Shplonk evaluation challenge. * * @details Compute the commitment \f$ C \f$ that will be used to prove that Shplonk batching is performed correctly - * and check the evaluation claims of the batched univariate polynomials. The check is done by verifying that the - * polynomial corresponding to \f$ C \f$ evaluates to \f$ 0 \f$ at the Shplonk challenge point \f$ z \f$. + * (it is an MSM) and check the evaluation claims of the batched univariate polynomials. The check is done by + * verifying that the polynomial corresponding to \f$ C \f$ evaluates to \f$ 0 \f$ at the Shplonk challenge point + * \f$ z \f$. + * + * @note This function is basically just a wrapper around an MSM. * */ - static OpeningClaim reduce_batch_opening_claim( - const BatchOpeningClaim& batch_opening_claim) + static OpeningClaim reduce_batch_opening_claim(const BatchOpeningClaim& batch_opening_claim) { // Extract batch_mul arguments from the accumulator const auto& commitments = batch_opening_claim.commitments; @@ -716,12 +702,11 @@ template class IPA // Compute \f$ C = \sum \text{commitments}_i \cdot \text{scalars}_i \f$ GroupElement shplonk_output_commitment; if constexpr (Curve::is_stdlib_type) { - shplonk_output_commitment = - GroupElement::batch_mul(commitments, scalars); + shplonk_output_commitment = GroupElement::batch_mul(commitments, scalars); } else { shplonk_output_commitment = batch_mul_native(commitments, scalars); } - // Output an opening claim to be verified by the IPA opening protocol + // Output an opening claim, which in practice will be verified by the IPA opening protocol return { { shplonk_eval_challenge, Fr(0) }, shplonk_output_commitment }; } @@ -734,11 +719,12 @@ template class IPA * @return bool */ static bool reduce_verify_batch_opening_claim(const BatchOpeningClaim& batch_opening_claim, - const VK& vk, - auto& transcript) + const VK& vk, + auto& transcript) requires(!Curve::is_stdlib_type) { const auto opening_claim = reduce_batch_opening_claim(batch_opening_claim); + add_claim_to_hash_buffer(opening_claim, transcript); return reduce_verify_internal_native(vk, opening_claim, transcript); } @@ -756,7 +742,8 @@ template class IPA requires(Curve::is_stdlib_type) { const auto opening_claim = reduce_batch_opening_claim(batch_opening_claim); - return reduce_verify_internal_recursive(opening_claim, transcript); + add_claim_to_hash_buffer(opening_claim, transcript); + return reduce_verify_internal_recursive(opening_claim, transcript).verifier_accumulator; } /** @@ -767,82 +754,98 @@ template class IPA * @param r * @return Fr */ - static Fr evaluate_challenge_poly(const std::vector& u_challenges_inv, Fr r) { - + static Fr evaluate_challenge_poly(const std::vector& u_challenges_inv, Fr r) + { + // Runs the obvious algorithm to compute the product ∏_{i ∈ [k]} (1 + u_{len-i}^{-1}.r^{2^{i-1}}) by + // remembering the current 2-primary power of r. Fr challenge_poly_eval = 1; Fr r_pow = r; - - for (size_t i = 0; i < log_poly_length; i++) { - + // the loop runs to `log_poly_length - 1` because we don't want to superfluously compute r_pow.sqr() in the last + // round. + for (size_t i = 0; i < log_poly_length - 1; i++) { Fr monomial = u_challenges_inv[log_poly_length - 1 - i] * r_pow; - challenge_poly_eval *= (Fr(1) + monomial); r_pow = r_pow.sqr(); } + // same as the body of the loop, without `r_pow = r_pow.sqr()` + Fr monomial = u_challenges_inv[0] * r_pow; + challenge_poly_eval *= (Fr(1) + monomial); return challenge_poly_eval; } /** * @brief Combines the challenge_poly evaluations using the challenge alpha. * + * @details This will be used to batch IPA claims. + * * @param u_challenges_inv_1 * @param u_challenges_inv_2 * @param r * @param alpha * @return Fr */ - static Fr evaluate_and_accumulate_challenge_polys(std::vector u_challenges_inv_1, std::vector u_challenges_inv_2, Fr r, Fr alpha) { - auto result = evaluate_challenge_poly(u_challenges_inv_1, r) + alpha * evaluate_challenge_poly(u_challenges_inv_2, r); + static Fr evaluate_and_accumulate_challenge_polys(std::vector u_challenges_inv_1, + std::vector u_challenges_inv_2, + Fr r, + Fr alpha) + { + auto result = + evaluate_challenge_poly(u_challenges_inv_1, r) + alpha * evaluate_challenge_poly(u_challenges_inv_2, r); return result; } /** - * @brief Constructs challenge_poly(X) = ∏_{i ∈ [k]} (1 + u_{len-i}^{-1}.X^{2^{i-1}}). + * @brief Constructs challenge_poly(X) = ∏_{i ∈ [k]} (1 + u_{len-i}^{-1}.X^{2^{i-1}}), with coefficients in + * \f$\mathbb{F}_q\f$. The coefficients of this are alternatively known as `s_vec`. * * @param u_challenges_inv * @return Polynomial */ - static Polynomial construct_poly_from_u_challenges_inv(const std::span& u_challenges_inv) { + static Polynomial construct_poly_from_u_challenges_inv(const std::span& u_challenges_inv) + { // Construct vector s in linear time. std::vector s_vec(poly_length, bb::fq::one()); - std::vector s_vec_temporaries(poly_length / 2); bb::fq* previous_round_s = &s_vec_temporaries[0]; bb::fq* current_round_s = &s_vec[0]; // if number of rounds is even we need to swap these so that s_vec always contains the result - if ((log_poly_length & 1) == 0) - { + if ((log_poly_length & 1) == 0) { std::swap(previous_round_s, current_round_s); } previous_round_s[0] = bb::fq(1); - for (size_t i = 0; i < log_poly_length; ++i) - { + for (size_t i = 0; i < log_poly_length; ++i) { const size_t round_size = 1 << (i + 1); - const fq round_challenge = u_challenges_inv[i]; + const bb::fq round_challenge = u_challenges_inv[i]; parallel_for_heuristic( round_size / 2, [&](size_t j) { current_round_s[j * 2] = previous_round_s[j]; current_round_s[j * 2 + 1] = previous_round_s[j] * round_challenge; - }, thread_heuristics::FF_MULTIPLICATION_COST * 2); + }, + thread_heuristics::FF_MULTIPLICATION_COST * 2); std::swap(current_round_s, previous_round_s); } - return {s_vec, poly_length}; + return { s_vec, poly_length }; } /** * @brief Combines two challenge_polys using the challenge alpha. * + * @details via the formula `challenge_poly_1 + alpha * challenge_poly_2`. + * * @param u_challenges_inv_1 * @param u_challenges_inv_2 * @param alpha * @return Polynomial */ - static Polynomial create_challenge_poly(const std::vector& u_challenges_inv_1, const std::vector& u_challenges_inv_2, bb::fq alpha) { + static Polynomial create_challenge_poly(const std::vector& u_challenges_inv_1, + const std::vector& u_challenges_inv_2, + bb::fq alpha) + { // Always extend each to 1< challenge_poly(1< challenge_poly(1 << log_poly_length); Polynomial challenge_poly_1 = construct_poly_from_u_challenges_inv(u_challenges_inv_1); Polynomial challenge_poly_2 = construct_poly_from_u_challenges_inv(u_challenges_inv_2); challenge_poly += challenge_poly_1; @@ -851,81 +854,98 @@ template class IPA } /** - * @brief Takes two IPA claims and accumulates them into 1 IPA claim. Also computes IPA proof for the claim. - * @details We create an IPA accumulator by running the IPA recursive verifier on each claim. Then, we generate challenges, and use these challenges to compute the new accumulator. We also create the accumulated polynomial, and generate the IPA proof for the accumulated claim. - * More details are described here: https://hackmd.io/IXoLIPhVT_ej8yhZ_Ehvuw?both. + * @brief Takes two IPA claims and accumulates them into a single IPA claim. Also computes IPA proof for the claim. + * + * @details We create an IPA accumulator by running the partial IPA recursive verifier on each claim. Then, we + * generate challenges, and use these challenges to compute the new accumulator. We also create the accumulated + * polynomial, and generate the IPA proof for the accumulated claim. More details are described here: + * https://hackmd.io/IXoLIPhVT_ej8yhZ_Ehvuw?both. * - * @param verifier_ck + * @param ck * @param transcript_1 * @param claim_1 * @param transcript_2 * @param claim_2 * @return std::pair, HonkProof> */ - static std::pair, HonkProof> accumulate(const CommitmentKey& ck, auto& transcript_1, OpeningClaim claim_1, auto& transcript_2, OpeningClaim claim_2) - requires Curve::is_stdlib_type + static std::pair, HonkProof> accumulate(const CommitmentKey& ck, + auto& transcript_1, + OpeningClaim claim_1, + auto& transcript_2, + OpeningClaim claim_2) + requires Curve::is_stdlib_type { using NativeCurve = curve::Grumpkin; using Builder = typename Curve::Builder; - // Step 1: Run the verifier for each IPA instance - VerifierAccumulator pair_1 = reduce_verify(claim_1, transcript_1); - VerifierAccumulator pair_2 = reduce_verify(claim_2, transcript_2); + // Step 1: Run the partial verifier for each IPA instance + VerifierAccumulator verifier_accumulator_1 = reduce_verify(claim_1, transcript_1); + VerifierAccumulator verifier_accumulator_2 = reduce_verify(claim_2, transcript_2); // Step 2: Generate the challenges by hashing the pairs using StdlibTranscript = BaseTranscript>; StdlibTranscript transcript; - transcript.send_to_verifier("u_challenges_inv_1", pair_1.u_challenges_inv); - transcript.send_to_verifier("U_1", pair_1.comm); - transcript.send_to_verifier("u_challenges_inv_2", pair_2.u_challenges_inv); - transcript.send_to_verifier("U_2", pair_2.comm); + transcript.add_to_hash_buffer("u_challenges_inv_1", verifier_accumulator_1.u_challenges_inv); + transcript.add_to_hash_buffer("U_1", verifier_accumulator_1.comm); + transcript.add_to_hash_buffer("u_challenges_inv_2", verifier_accumulator_2.u_challenges_inv); + transcript.add_to_hash_buffer("U_2", verifier_accumulator_2.comm); auto [alpha, r] = transcript.template get_challenges("IPA:alpha", "IPA:r"); // Step 3: Compute the new accumulator OpeningClaim output_claim; - output_claim.commitment = pair_1.comm + pair_2.comm * alpha; + output_claim.commitment = verifier_accumulator_1.comm + verifier_accumulator_2.comm * alpha; output_claim.opening_pair.challenge = r; // Evaluate the challenge_poly polys at r and linearly combine them with alpha challenge - output_claim.opening_pair.evaluation = evaluate_and_accumulate_challenge_polys(pair_1.u_challenges_inv, pair_2.u_challenges_inv, r, alpha); + output_claim.opening_pair.evaluation = evaluate_and_accumulate_challenge_polys( + verifier_accumulator_1.u_challenges_inv, verifier_accumulator_2.u_challenges_inv, r, alpha); - // Step 4: Compute the new polynomial + // Step 4: Compute the new challenge polynomial natively std::vector native_u_challenges_inv_1; std::vector native_u_challenges_inv_2; - for (Fr u_inv_i : pair_1.u_challenges_inv) { + for (Fr u_inv_i : verifier_accumulator_1.u_challenges_inv) { native_u_challenges_inv_1.push_back(bb::fq(u_inv_i.get_value())); } - for (Fr u_inv_i : pair_2.u_challenges_inv) { + for (Fr u_inv_i : verifier_accumulator_2.u_challenges_inv) { native_u_challenges_inv_2.push_back(bb::fq(u_inv_i.get_value())); } + Polynomial challenge_poly = + create_challenge_poly(native_u_challenges_inv_1, native_u_challenges_inv_2, bb::fq(alpha.get_value())); + // Compute proof for the claim auto prover_transcript = std::make_shared(); const OpeningPair opening_pair{ bb::fq(output_claim.opening_pair.challenge.get_value()), bb::fq(output_claim.opening_pair.evaluation.get_value()) }; - Polynomial challenge_poly = create_challenge_poly(native_u_challenges_inv_1, native_u_challenges_inv_2, fq(alpha.get_value())); - BB_ASSERT_EQ(challenge_poly.evaluate(opening_pair.challenge), opening_pair.evaluation, "Opening claim does not hold for challenge polynomial."); + BB_ASSERT_EQ(challenge_poly.evaluate(opening_pair.challenge), + opening_pair.evaluation, + "Opening claim does not hold for challenge polynomial."); - IPA::compute_opening_proof(ck, { challenge_poly, opening_pair }, prover_transcript); - BB_ASSERT_EQ(challenge_poly.evaluate(fq(output_claim.opening_pair.challenge.get_value())), fq(output_claim.opening_pair.evaluation.get_value()), "Opening claim does not hold for challenge polynomial."); + IPA::compute_opening_proof( + ck, { challenge_poly, opening_pair }, prover_transcript); + BB_ASSERT_EQ(challenge_poly.evaluate(bb::fq(output_claim.opening_pair.challenge.get_value())), + bb::fq(output_claim.opening_pair.evaluation.get_value()), + "Opening claim does not hold for challenge polynomial."); output_claim.opening_pair.evaluation.self_reduce(); - return {output_claim, prover_transcript->export_proof()}; + return { output_claim, prover_transcript->export_proof() }; } - static std::pair, HonkProof> create_fake_ipa_claim_and_proof(UltraCircuitBuilder& builder) - requires Curve::is_stdlib_type { + static std::pair, HonkProof> create_random_valid_ipa_claim_and_proof( + UltraCircuitBuilder& builder) + requires Curve::is_stdlib_type + { using NativeCurve = curve::Grumpkin; using Builder = typename Curve::Builder; using Curve = stdlib::grumpkin; auto ipa_transcript = std::make_shared(); CommitmentKey ipa_commitment_key(poly_length); size_t n = poly_length; - auto poly = Polynomial(n); + auto poly = Polynomial(n); for (size_t i = 0; i < n; i++) { - poly.at(i) = fq::random_element(); + poly.at(i) = bb::fq::random_element(); } - fq x = fq::random_element(); - fq eval = poly.evaluate(x); + bb::fq x = bb::fq::random_element(); + bb::fq eval = poly.evaluate(x); auto commitment = ipa_commitment_key.commit(poly); const OpeningPair opening_pair = { x, eval }; IPA::compute_opening_proof(ipa_commitment_key, { poly, opening_pair }, ipa_transcript); @@ -935,7 +955,7 @@ template class IPA auto stdlib_eval = Curve::ScalarField::from_witness(&builder, eval); OpeningClaim stdlib_opening_claim{ { stdlib_x, stdlib_eval }, stdlib_comm }; - return {stdlib_opening_claim, ipa_transcript->export_proof()}; + return { stdlib_opening_claim, ipa_transcript->export_proof() }; } }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp index deff323cfd00..1539ceecce96 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp @@ -29,7 +29,6 @@ class IPATest : public CommitmentTest { static CK ck; static VK vk; - // For edge cases static constexpr size_t log_n = 7; using PCS = IPA; @@ -41,82 +40,148 @@ class IPATest : public CommitmentTest { ck = create_commitment_key(n); vk = create_verifier_commitment_key(); } + + struct ResultOfProveVerify { + bool result; + std::shared_ptr prover_transcript; + std::shared_ptr verifier_transcript; + }; + + static ResultOfProveVerify run_native_prove_verify(const Polynomial& poly, const Fr x) + { + Commitment commitment = ck.commit(poly); + auto eval = poly.evaluate(x); + const OpeningPair opening_pair = { x, eval }; + const OpeningClaim opening_claim{ opening_pair, commitment }; + + // initialize empty prover transcript + auto prover_transcript = std::make_shared(); + PCS::compute_opening_proof(ck, { poly, opening_pair }, prover_transcript); + + // initialize verifier transcript from proof data + auto verifier_transcript = std::make_shared(); + verifier_transcript->load_proof(prover_transcript->export_proof()); + // the native reduce_verify does a _complete_ IPA proof and returns whether or not the checks pass. + bool result = PCS::reduce_verify(vk, opening_claim, verifier_transcript); + return { result, prover_transcript, verifier_transcript }; + } }; } // namespace #define IPA_TEST #include "ipa.hpp" +// Commit Tests: below are several tests to make sure commitment is correct. +// +// Commit to a polynomial that is non-zero but has many zero coefficients TEST_F(IPATest, CommitOnManyZeroCoeffPolyWorks) { - constexpr size_t n = 4; + constexpr size_t n = 16; Polynomial p(n); for (size_t i = 0; i < n - 1; i++) { p.at(i) = Fr::zero(); } - p.at(3) = Fr::one(); + p.at(3) = this->random_element(); GroupElement commitment = ck.commit(p); auto srs_elements = ck.srs->get_monomial_points(); GroupElement expected = srs_elements[0] * p[0]; - // The SRS stored in the commitment key is the result after applying the pippenger point table so the - // values at odd indices contain the point {srs[i-1].x * beta, srs[i-1].y}, where beta is the endomorphism - // G_vec_local should use only the original SRS thus we extract only the even indices. for (size_t i = 1; i < n; i += 1) { expected += srs_elements[i] * p[i]; } EXPECT_EQ(expected.normalize(), commitment.normalize()); } - -// This test checks that we can correctly open a zero polynomial. Since we often have point at infinity troubles, it -// detects those. -TEST_F(IPATest, OpenZeroPolynomial) +// Commit to zero poly +TEST_F(IPATest, CommitToZeroPoly) { Polynomial poly(n); - // Commit to a zero polynomial Commitment commitment = ck.commit(poly); EXPECT_TRUE(commitment.is_point_at_infinity()); - auto [x, eval] = this->random_eval(poly); + auto x = this->random_element(); + auto eval = poly.evaluate(x); EXPECT_EQ(eval, Fr::zero()); - const OpeningPair opening_pair = { x, eval }; - const OpeningClaim opening_claim{ opening_pair, commitment }; - - // initialize empty prover transcript - auto prover_transcript = std::make_shared(); - PCS::compute_opening_proof(ck, { poly, opening_pair }, prover_transcript); - - // initialize verifier transcript from proof data - auto verifier_transcript = std::make_shared(); - verifier_transcript->load_proof(prover_transcript->export_proof()); +} +// Commit to a random poly +TEST_F(IPATest, Commit) +{ + auto poly = Polynomial::random(n); + const GroupElement commitment = ck.commit(poly); + auto srs_elements = ck.srs->get_monomial_points(); + GroupElement expected = srs_elements[0] * poly[0]; + for (size_t i = 1; i < n; ++i) { + expected += srs_elements[i] * poly[i]; + } + EXPECT_EQ(expected.normalize(), commitment.normalize()); +} - bool result = PCS::reduce_verify(vk, opening_claim, verifier_transcript); +// Opening tests, i.e., check completeness for prove-and-verify. +// +// poly is zero, point is random +TEST_F(IPATest, OpenZeroPolynomial) +{ + Polynomial poly(n); + auto x = this->random_element(); + bool result = run_native_prove_verify(poly, x).result; EXPECT_TRUE(result); } -// This test makes sure that even if the whole vector \vec{b} generated from the x, at which we open the polynomial, -// is zero, IPA behaves +TEST_F(IPATest, OpenManyZerosPolynomial) +{ + // polynomial with zero odd coefficients and random even coefficients + Polynomial poly_even(n); + // polynomial with zero even coefficients and random odd coefficients + Polynomial poly_odd(n); + for (size_t i = 0; i < n / 2; ++i) { + poly_even.at(2 * i) = this->random_element(); + poly_odd.at(2 * i + 1) = this->random_element(); + } + auto x = this->random_element(); + bool result_even = run_native_prove_verify(poly_even, x).result; + bool result_odd = run_native_prove_verify(poly_odd, x).result; + EXPECT_TRUE(result_even && result_odd); +} + +// poly is random, point is zero TEST_F(IPATest, OpenAtZero) { // generate a random polynomial, degree needs to be a power of two auto poly = Polynomial::random(n); const Fr x = Fr::zero(); - const Fr eval = poly.evaluate(x); - const Commitment commitment = ck.commit(poly); - const OpeningPair opening_pair = { x, eval }; - const OpeningClaim opening_claim{ opening_pair, commitment }; + bool result = run_native_prove_verify(poly, x).result; + EXPECT_TRUE(result); +} - // initialize empty prover transcript - auto prover_transcript = std::make_shared(); - PCS::compute_opening_proof(ck, { poly, opening_pair }, prover_transcript); +// poly and point are random +TEST_F(IPATest, Open) +{ + // generate a random polynomial, degree needs to be a power of two + auto poly = Polynomial::random(n); + auto x = this->random_element(); + auto result_of_prove_verify = run_native_prove_verify(poly, x); + EXPECT_TRUE(result_of_prove_verify.result); - // initialize verifier transcript from proof data - auto verifier_transcript = std::make_shared(); - verifier_transcript->load_proof(prover_transcript->export_proof()); + EXPECT_EQ(result_of_prove_verify.prover_transcript->get_manifest(), + result_of_prove_verify.verifier_transcript->get_manifest()); +} - bool result = PCS::reduce_verify(vk, opening_claim, verifier_transcript); +// poly and point are random, condition on the fact that the evaluation is zero. +TEST_F(IPATest, OpeningValueZero) +{ + // generate random polynomial + auto poly = Polynomial::random(n); + auto x = this->random_element(); + auto initial_evaluation = poly.evaluate(x); + auto change_in_linear_coefficient = initial_evaluation / x; + // change linear coefficient so that poly(x) == 0. + poly.at(1) -= change_in_linear_coefficient; + + EXPECT_EQ(poly.evaluate(x), Fr::zero()); + bool result = run_native_prove_verify(poly, x).result; EXPECT_TRUE(result); } +// Tests that "artificially" mutate the Transcript. This uses the type `MockTranscript`. + namespace bb { #if !defined(__wasm__) // This test ensures that IPA throws or aborts when a challenge is zero, since it breaks the logic of the argument @@ -145,7 +210,7 @@ TEST_F(IPATest, ChallengesAreZero) auto new_random_vector = random_vector; new_random_vector[i] = Fr::zero(); transcript->initialize(new_random_vector); - EXPECT_ANY_THROW(PCS::compute_opening_proof_internal(ck, { poly, opening_pair }, transcript)); + EXPECT_ANY_THROW(PCS::compute_opening_proof(ck, { poly, opening_pair }, transcript)); } // Fill out a vector of affine elements that the verifier receives from the prover with generators (we don't care // about them right now) @@ -159,7 +224,7 @@ TEST_F(IPATest, ChallengesAreZero) auto new_random_vector = random_vector; new_random_vector[i] = Fr::zero(); transcript->initialize(new_random_vector, lrs, { uint256_t(n) }); - EXPECT_ANY_THROW(PCS::reduce_verify_internal_native(vk, opening_claim, transcript)); + EXPECT_ANY_THROW(PCS::reduce_verify(vk, opening_claim, transcript)); } } @@ -179,7 +244,6 @@ TEST_F(IPATest, AIsZeroAfterOneRound) // initialize an empty mock transcript auto transcript = std::make_shared(); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1159): Decouple constant from IPA. const size_t num_challenges = log_n + 1; std::vector random_vector(num_challenges); @@ -194,56 +258,21 @@ TEST_F(IPATest, AIsZeroAfterOneRound) transcript->initialize(random_vector); // Compute opening proof - PCS::compute_opening_proof_internal(ck, { poly, opening_pair }, transcript); + PCS::compute_opening_proof(ck, { poly, opening_pair }, transcript); // Reset indices transcript->reset_indices(); // Verify - EXPECT_TRUE(PCS::reduce_verify_internal_native(vk, opening_claim, transcript)); + EXPECT_TRUE(PCS::reduce_verify(vk, opening_claim, transcript)); } #endif } // namespace bb -TEST_F(IPATest, Commit) -{ - auto poly = Polynomial::random(n); - const GroupElement commitment = ck.commit(poly); - auto srs_elements = ck.srs->get_monomial_points(); - GroupElement expected = srs_elements[0] * poly[0]; - // The SRS stored in the commitment key is the result after applying the pippenger point table so the - // values at odd indices contain the point {srs[i-1].x * beta, srs[i-1].y}, where beta is the endomorphism - // G_vec_local should use only the original SRS thus we extract only the even indices. - for (size_t i = 1; i < n; i += 1) { - expected += srs_elements[i] * poly[i]; - } - EXPECT_EQ(expected.normalize(), commitment.normalize()); -} +// Tests of batched MLPCS, where IPA is the final univariate commitment scheme. -TEST_F(IPATest, Open) -{ - // generate a random polynomial, degree needs to be a power of two - auto poly = Polynomial::random(n); - auto [x, eval] = this->random_eval(poly); - auto commitment = ck.commit(poly); - const OpeningPair opening_pair = { x, eval }; - const OpeningClaim opening_claim{ opening_pair, commitment }; - - // initialize empty prover transcript - auto prover_transcript = std::make_shared(); - PCS::compute_opening_proof(ck, { poly, opening_pair }, prover_transcript); - - // initialize verifier transcript from proof data - auto verifier_transcript = std::make_shared(); - verifier_transcript->load_proof(prover_transcript->export_proof()); - - auto result = PCS::reduce_verify(vk, opening_claim, verifier_transcript); - EXPECT_TRUE(result); - - EXPECT_EQ(prover_transcript->get_manifest(), verifier_transcript->get_manifest()); -} - -TEST_F(IPATest, GeminiShplonkIPAWithShift) +// Gemini + Shplonk + IPA. Two random polynomials, no shifts. +TEST_F(IPATest, GeminiShplonkIPAWithoutShift) { // Generate multilinear polynomials, their commitments (genuine and mocked) and evaluations (genuine) at a random // point. @@ -279,14 +308,15 @@ TEST_F(IPATest, GeminiShplonkIPAWithShift) EXPECT_EQ(result, true); } +// Shplemini + IPA. Five polynomials, one of which is shifted. TEST_F(IPATest, ShpleminiIPAWithShift) { // Generate multilinear polynomials, their commitments (genuine and mocked) and evaluations (genuine) at a random // point. auto mle_opening_point = this->random_evaluation_point(log_n); // sometimes denoted 'u' MockClaimGenerator mock_claims(n, - /*num_polynomials*/ 2, - /*num_to_be_shifted*/ 0, + /*num_polynomials*/ 4, + /*num_to_be_shifted*/ 1, /*num_to_be_right_shifted_by_k*/ 0, mle_opening_point, ck); @@ -318,10 +348,7 @@ TEST_F(IPATest, ShpleminiIPAWithShift) EXPECT_EQ(result, true); } -/** - * @brief Test the behaviour of the method ShpleminiVerifier::remove_shifted_commitments - * - */ +// Test `ShpleminiVerifier::remove_shifted_commitments`. Four polynomials, two of which are shifted. TEST_F(IPATest, ShpleminiIPAShiftsRemoval) { // Generate multilinear polynomials, their commitments (genuine and mocked) and evaluations (genuine) at a random diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp index 4c4f4bb9d65b..46c51f0a11ad 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp @@ -288,7 +288,7 @@ TYPED_TEST(ShpleminiTest, ShpleminiZKNoSumcheckOpenings) // Generate Libra polynomials, compute masked concatenated Libra polynomial, commit to it ZKData zk_sumcheck_data(this->log_n, prover_transcript, ck); - // Generate multivariate challenge of size CONST_PROOF_SIZE_LOG_N + // Generate multivariate challenge std::vector mle_opening_point = this->random_evaluation_point(this->log_n); // Generate random prover polynomials, compute their evaluations and commitments @@ -303,7 +303,7 @@ TYPED_TEST(ShpleminiTest, ShpleminiZKNoSumcheckOpenings) const Fr claimed_inner_product = SmallSubgroupIPAProver::compute_claimed_inner_product( zk_sumcheck_data, mle_opening_point, this->log_n); - prover_transcript->template send_to_verifier("Libra:claimed_evaluation", claimed_inner_product); + prover_transcript->send_to_verifier("Libra:claimed_evaluation", claimed_inner_product); // Instantiate SmallSubgroupIPAProver, this prover sends commitments to Big Sum and Quotient polynomials SmallSubgroupIPAProver small_subgroup_ipa_prover( @@ -412,7 +412,7 @@ TYPED_TEST(ShpleminiTest, ShpleminiZKWithSumcheckOpenings) const Fr claimed_inner_product = SmallSubgroupIPAProver::compute_claimed_inner_product(zk_sumcheck_data, challenge, this->log_n); - prover_transcript->template send_to_verifier("Libra:claimed_evaluation", claimed_inner_product); + prover_transcript->send_to_verifier("Libra:claimed_evaluation", claimed_inner_product); // Instantiate SmallSubgroupIPAProver, this prover sends commitments to Big Sum and Quotient polynomials SmallSubgroupIPAProver small_subgroup_ipa_prover( diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp index f1e5b6541e6b..85553677db36 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp @@ -93,8 +93,8 @@ template class ShplonkProver_ { current_nu *= nu; } // We use the same batching challenge for Gemini and Libra opening claims. The number of the claims - // batched before adding Libra commitments and evaluations is bounded by 2 * CONST_PROOF_SIZE_LOG_N + 2, where - // 2 * CONST_PROOF_SIZE_LOG_N is the number of fold claims including the dummy ones, and +2 is reserved for + // batched before adding Libra commitments and evaluations is bounded by 2 * `virtual_log_n` + 2, where + // 2 * `virtual_log_n` is the number of fold claims including the dummy ones, and +2 is reserved for // interleaving. if (!libra_opening_claims.empty()) { current_nu = nu.pow(2 * virtual_log_n + NUM_INTERLEAVING_CLAIMS); @@ -381,7 +381,7 @@ template class ShplonkVerifier_ { pows_of_nu.reserve(num_claims); commitments.insert(commitments.end(), polynomial_commitments.begin(), polynomial_commitments.end()); - scalars.insert(scalars.end(), commitments.size() - 1, Fr(0)); // Initialised as circuit constants + scalars.insert(scalars.end(), commitments.size() - 1, Fr(0)); // Initialized as circuit constants // The first two powers of nu have already been initialized, we need another `num_claims - 2` powers to batch // all the claims for (size_t idx = 0; idx < num_claims - 2; idx++) { diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp index 1435648e0d6d..eef1504e5d4d 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp @@ -36,7 +36,7 @@ TYPED_TEST(ShplonkTest, ShplonkSimple) // An intermediate check to confirm the opening of the shplonk prover witness Q this->verify_opening_pair(batched_opening_claim.opening_pair, batched_opening_claim.polynomial); - // Initialise verifier transcript from prover transcript + // Initialize verifier transcript from prover transcript auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript); // Execute the shplonk verifier functionality diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.cpp index 77be456f249a..eef7544a5d31 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.cpp @@ -154,8 +154,7 @@ template void SmallSubgroupIPAProver::prove() compute_grand_sum_polynomial(); // Send masked commitment [A + Z_H * R] to the verifier, where R is of degree 2 - transcript->template send_to_verifier(label_prefix + "grand_sum_commitment", - commitment_key.commit(grand_sum_polynomial)); + transcript->send_to_verifier(label_prefix + "grand_sum_commitment", commitment_key.commit(grand_sum_polynomial)); // Compute C(X) compute_grand_sum_identity_polynomial(); @@ -164,8 +163,8 @@ template void SmallSubgroupIPAProver::prove() compute_grand_sum_identity_quotient(); // Send commitment [Q] to the verifier - transcript->template send_to_verifier(label_prefix + "quotient_commitment", - commitment_key.commit(grand_sum_identity_quotient)); + transcript->send_to_verifier(label_prefix + "quotient_commitment", + commitment_key.commit(grand_sum_identity_quotient)); } /** diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/batch_mul_native.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/batch_mul_native.hpp index d9ef4e78ec59..5c20c328d9e5 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/batch_mul_native.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/batch_mul_native.hpp @@ -41,17 +41,4 @@ static Commitment batch_mul_native(const std::vector& _points, const return result; } -/** - * @brief Utility for native batch multiplication of group elements - * @note This is used only for native verification and is not optimized for efficiency - */ -template static FF linear_combination(const std::vector& as, const std::vector& bs) -{ - FF result = as[0] * bs[0]; - for (size_t idx = 1; idx < as.size(); ++idx) { - result += as[idx] * bs[idx]; - } - return result; -} - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/mock_witness_generator.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/mock_witness_generator.hpp index 1b530a4ae882..d70ab9d2d8ea 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/mock_witness_generator.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/mock_witness_generator.hpp @@ -87,15 +87,36 @@ template struct MockClaimGenerator { , polynomial_batcher(poly_size) { + size_t log_size = numeric::get_msb(poly_size); + // If the size of the opening point is bigger than the log of the poly size, we assume that the prover is + // extending all of its polynomials by zero outside of the hypercube of size 2^{log_size}. + bool has_virtual_rounds = (mle_opening_point.size() > log_size); + + std::span challenge; + + if (has_virtual_rounds) { + // The evaluation on the full domain can be obtain by scaling by extension-by-zero factor `ebz_factor` + // computed below. + challenge = std::span(mle_opening_point).subspan(0, log_size); + } else { + challenge = std::span(mle_opening_point); + } + const size_t total_num_to_be_shifted = num_to_be_shifted + num_to_be_right_shifted_by_k; BB_ASSERT_GTE(num_polynomials, total_num_to_be_shifted); const size_t num_not_to_be_shifted = num_polynomials - total_num_to_be_shifted; + Fr ebz_factor = 1; + + for (size_t idx = log_size; idx < mle_opening_point.size(); idx++) { + ebz_factor *= (Fr(1) - mle_opening_point[idx]); + } + // Construct claim data for polynomials that are NOT to be shifted for (size_t idx = 0; idx < num_not_to_be_shifted; idx++) { Polynomial poly = Polynomial::random(poly_size); unshifted.commitments.push_back(ck.commit(poly)); - unshifted.evals.push_back(poly.evaluate_mle(mle_opening_point)); + unshifted.evals.push_back(poly.evaluate_mle(challenge) * ebz_factor); unshifted.polys.push_back(std::move(poly)); } @@ -104,11 +125,11 @@ template struct MockClaimGenerator { Polynomial poly = Polynomial::random(poly_size, /*shiftable*/ 1); Commitment commitment = ck.commit(poly); to_be_shifted.commitments.push_back(commitment); - to_be_shifted.evals.push_back(poly.shifted().evaluate_mle(mle_opening_point)); + to_be_shifted.evals.push_back(poly.shifted().evaluate_mle(challenge) * ebz_factor); to_be_shifted.polys.push_back(poly.share()); // Populate the unshifted counterpart in the unshifted claims unshifted.commitments.push_back(commitment); - unshifted.evals.push_back(poly.evaluate_mle(mle_opening_point)); + unshifted.evals.push_back(poly.evaluate_mle(challenge) * ebz_factor); unshifted.polys.push_back(std::move(poly)); } @@ -117,11 +138,12 @@ template struct MockClaimGenerator { Polynomial poly = Polynomial::random(poly_size - k_magnitude, poly_size, 0); Commitment commitment = ck.commit(poly); to_be_right_shifted_by_k.commitments.push_back(commitment); - to_be_right_shifted_by_k.evals.push_back(poly.right_shifted(k_magnitude).evaluate_mle(mle_opening_point)); + to_be_right_shifted_by_k.evals.push_back(poly.right_shifted(k_magnitude).evaluate_mle(challenge) * + ebz_factor); to_be_right_shifted_by_k.polys.push_back(poly.share()); // Populate the unshifted counterpart in the unshifted claims unshifted.commitments.push_back(commitment); - unshifted.evals.push_back(poly.evaluate_mle(mle_opening_point)); + unshifted.evals.push_back(poly.evaluate_mle(challenge) * ebz_factor); unshifted.polys.push_back(std::move(poly)); } @@ -163,6 +185,31 @@ template struct MockClaimGenerator { ClaimBatcher{ .unshifted = ClaimBatch{ RefVector(unshifted.commitments), RefVector(unshifted.evals) } }; } + // Generates mock claims by using the custom polynomials provided as input instead of random polynomials. Used for + // the high degree attack tests. + MockClaimGenerator(const size_t poly_size, + const std::vector custom_unshifted, + const std::vector& custom_unshifted_evals, + const CommitmentKey& commitment_key) + + : ck(commitment_key) + , polynomial_batcher(poly_size) + { + + // ---------- Unshifted ---------- + for (size_t i = 0; i < custom_unshifted.size(); ++i) { + auto& p = custom_unshifted[i]; + unshifted.commitments.push_back(ck.commit(p)); + unshifted.evals.push_back(custom_unshifted_evals[i]); + unshifted.polys.push_back(std::move(p)); + } + + polynomial_batcher.set_unshifted(RefVector(unshifted.polys)); + + claim_batcher = + ClaimBatcher{ .unshifted = ClaimBatch{ RefVector(unshifted.commitments), RefVector(unshifted.evals) } }; + } + InterleaveData generate_interleaving_inputs(const std::vector& u_challenge, const size_t num_interleaved, const size_t group_size, diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp index c4905f9997ce..f800c255fb23 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp @@ -9,7 +9,7 @@ #include "barretenberg/stdlib/proof/proof.hpp" #include "barretenberg/stdlib/transcript/transcript.hpp" #include "barretenberg/transcript/transcript.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" using namespace bb; @@ -29,34 +29,96 @@ class IPARecursiveTests : public CommitmentTest { using StdlibProof = bb::stdlib::Proof; using StdlibTranscript = bb::stdlib::recursion::honk::UltraStdlibTranscript; + // `FailureMode::None` corresponds to a normal, completeness test. The other cases are legitimate failure modes, + // where the test should fail. As neither `a_0` nor `G_0` are hashed, the corresponding variants will not fail for + // Fiat-Shamir reasons. The last failure mode is: we send an OpeningClaim to the hash buffer, then + // we have the prover run the IPA process with a _different polynomial_. + enum class FailureMode : std::uint8_t { None, A_Zero, G_Zero, ChangePoly }; + + /** + * @brief Given a builder, polynomial, and challenge point, return the transcript and opening claim _in circuit_. + * + * @details Given a `poly` and `x`, first generates a native proof (and verifiers), then loads the proof into a + * StdLib transcript. + * + * @tparam log_poly_length + * @param builder + * @param poly + * @param x + * @param failure_mode + * @return std::pair, OpeningClaim> + * + * @note assumes that the size of `poly` is exactly `1 << log_poly_length`. + */ template - std::pair, OpeningClaim> create_ipa_claim(Builder& builder) + std::pair, OpeningClaim> create_ipa_claim( + Builder& builder, Polynomial& poly, Fr x, FailureMode failure_mode = FailureMode::None) { using NativeIPA = IPA; - static constexpr size_t poly_length = 1UL << log_poly_length; - - // First generate an ipa proof - auto poly = Polynomial::random(poly_length); - // Commit to a zero polynomial + EXPECT_EQ(1UL << log_poly_length, poly.size()); Commitment commitment = this->commit(poly); + auto eval = poly.evaluate(x); - auto [x, eval] = this->random_eval(poly); const OpeningPair opening_pair = { x, eval }; const OpeningClaim opening_claim{ opening_pair, commitment }; - + const ProverOpeningClaim prover_claim{ poly, opening_pair }; // initialize empty prover transcript auto prover_transcript = std::make_shared(); - NativeIPA::compute_opening_proof(this->ck(), { poly, opening_pair }, prover_transcript); - + using DataType = NativeTranscriptParams::DataType; + std::vector proof; // Export proof - auto proof = prover_transcript->export_proof(); + switch (failure_mode) { + case FailureMode::None: + // Normal operation + NativeIPA::compute_opening_proof(this->ck(), prover_claim, prover_transcript); + proof = prover_transcript->export_proof(); + break; + case FailureMode::A_Zero: + NativeIPA::compute_opening_proof(this->ck(), prover_claim, prover_transcript); + proof = prover_transcript->export_proof(); + // Multiply the last element of the proof, what the prover sends as a_0, by 3 + proof.back() *= 3; + break; + case FailureMode::G_Zero: { + NativeIPA::compute_opening_proof(this->ck(), prover_claim, prover_transcript); + proof = prover_transcript->export_proof(); + // Multiply the second to last element of the proof, what the prover sends as G_0, by 2. + const size_t comm_frs = 2; // an affine Grumpkin point requires 2 Fr elements to represent. + const size_t offset = log_poly_length * 2 * comm_frs; // we first send the L_i and R_i, then G_0. + auto element_frs = std::span{ proof }.subspan(offset, comm_frs); + + Commitment op_commitment = NativeTranscriptParams::template deserialize(element_frs); + Commitment new_op_commitment = op_commitment + op_commitment; + auto new_op_commitment_reserialized = NativeTranscriptParams::serialize(new_op_commitment); + std::copy(new_op_commitment_reserialized.begin(), + new_op_commitment_reserialized.end(), + proof.begin() + static_cast(offset)); + break; + } + case FailureMode::ChangePoly: + // instead of calling compute_opening_proof, we first add the prover claim to the hash buffer, then we run + // IPA with a _new_ polynomial. + NativeIPA::add_claim_to_hash_buffer(this->ck(), prover_claim, prover_transcript); + // generate a new polynomial evaluation claim. + auto [new_poly, new_x] = generate_poly_and_challenge(); + auto new_eval = new_poly.evaluate(new_x); + + const OpeningPair new_opening_pair = { new_x, new_eval }; + const ProverOpeningClaim new_prover_claim{ new_poly, new_opening_pair }; + NativeIPA::compute_opening_proof_internal(this->ck(), new_prover_claim, prover_transcript); + proof = prover_transcript->export_proof(); + break; + } // initialize verifier transcript from proof data auto verifier_transcript = std::make_shared(); verifier_transcript->load_proof(proof); - + // run the native proof auto result = NativeIPA::reduce_verify(this->vk(), opening_claim, verifier_transcript); - EXPECT_TRUE(result); + + if (failure_mode == FailureMode::None) { + EXPECT_TRUE(result); + } // Recursively verify the proof auto stdlib_comm = Curve::Group::from_witness(&builder, commitment); @@ -69,49 +131,93 @@ class IPARecursiveTests : public CommitmentTest { recursive_verifier_transcript->load_proof(StdlibProof(builder, proof)); return { recursive_verifier_transcript, stdlib_opening_claim }; } - template Builder build_ipa_recursive_verifier_circuit() + /** + * @brief Given a `poly` and a challenge `x`, return the recursive verifier circuit. + * + * @tparam log_poly_length + * @param poly + * @param x + * @return Builder + */ + template + Builder build_ipa_recursive_verifier_circuit(Polynomial& poly, Fr x, FailureMode failure_mode = FailureMode::None) { using RecursiveIPA = IPA; Builder builder; - auto [stdlib_transcript, stdlib_claim] = create_ipa_claim(builder); + auto [stdlib_transcript, stdlib_claim] = create_ipa_claim(builder, poly, x, failure_mode); RecursiveIPA::reduce_verify(stdlib_claim, stdlib_transcript); stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); builder.finalize_circuit(/*ensure_nonzero=*/true); return builder; } + // flag to determine what type of polynomial to generate + enum class PolyType : std::uint8_t { Random, ManyZeros, Sparse, Zero }; + + template + std::tuple generate_poly_and_challenge(PolyType poly_type = PolyType::Random) + { + + static constexpr size_t poly_length = 1UL << log_poly_length; + Polynomial poly(poly_length); + switch (poly_type) { + case PolyType::Random: + poly = Polynomial::random(poly_length); + break; + case PolyType::ManyZeros: + poly = Polynomial::random(poly_length); + for (size_t i = 0; i < poly_length / 2; ++i) { + poly.at(i) = Fr::zero(); + } + case PolyType::Sparse: + // set a few coefficients to be non-zero + for (size_t i = 0; i < std::min(100, poly_length / 2); ++i) { + size_t idx = static_cast(this->engine->get_random_uint64() % poly_length); + poly.at(idx) = this->random_element(); + } + break; + case PolyType::Zero: + break; + } + auto x = this->random_element(); + return { poly, x }; + } /** * @brief Tests IPA recursion * @details Creates an IPA claim and then runs the recursive IPA verification and checks that the circuit is valid. - * @param POLY_LENGTH */ - template void test_recursive_ipa() + template + void test_recursive_ipa(Polynomial& poly, Fr x, FailureMode failure_mode = FailureMode::None) { - Builder builder(build_ipa_recursive_verifier_circuit()); + BB_DISABLE_ASSERTS(); + Builder builder(build_ipa_recursive_verifier_circuit(poly, x, failure_mode)); info("IPA Recursive Verifier num finalized gates = ", builder.get_num_finalized_gates()); - EXPECT_TRUE(CircuitChecker::check(builder)); + if (failure_mode == FailureMode::None) { + EXPECT_TRUE(CircuitChecker::check(builder)); + } else { + EXPECT_FALSE(CircuitChecker::check(builder)); + } } /** * @brief Tests IPA accumulation by accumulating two IPA claims and proving the accumulated claim * @details Creates two IPA claims, and then two IPA accumulators through recursive verification. Proves the * accumulated claim and checks that it verifies. - * @param POLY_LENGTH + * @param log_poly_length */ - template void test_accumulation() + template void test_accumulation(Polynomial& poly1, Polynomial& poly2, Fr x1, Fr x2) { - using NativeIPA = IPA; - using RecursiveIPA = IPA; + using NativeIPA = IPA; + using RecursiveIPA = IPA; // We create a circuit that does two IPA verifications. However, we don't do the full verifications and instead // accumulate the claims into one claim. This accumulation is done in circuit. Create two accumulators, which // contain the commitment and an opening claim. Builder builder; - - auto [transcript_1, claim_1] = create_ipa_claim(builder); - auto [transcript_2, claim_2] = create_ipa_claim(builder); + auto [transcript_1, claim_1] = create_ipa_claim(builder, poly1, x1); + auto [transcript_2, claim_2] = create_ipa_claim(builder, poly2, x2); // Creates two IPA accumulators and accumulators from the two claims. Also constructs the accumulated h // polynomial. @@ -144,63 +250,129 @@ class IPARecursiveTests : public CommitmentTest { /** * @brief Tests IPA recursion with polynomial of length 4 - * @details More details in test_recursive_ipa */ -TEST_F(IPARecursiveTests, RecursiveSmall) +TEST_F(IPARecursiveTests, RecursiveSmallSparse) { static constexpr size_t log_poly_length = 2; - test_recursive_ipa(); + auto [poly, x] = generate_poly_and_challenge(PolyType::ManyZeros); + test_recursive_ipa(poly, x); } /** * @brief Tests IPA recursion with polynomial of length 1024 - * @details More details in test_recursive_ipa */ -TEST_F(IPARecursiveTests, RecursiveMedium) +TEST_F(IPARecursiveTests, RecursiveMediumManyZeros) { static constexpr size_t log_poly_length = 10; - test_recursive_ipa(); + auto [poly, x] = generate_poly_and_challenge(PolyType::Sparse); + test_recursive_ipa(poly, x); +} + +TEST_F(IPARecursiveTests, RecursiveMediumZeroPoly) +{ + static constexpr size_t log_poly_length = 10; + auto [poly, x] = generate_poly_and_challenge(PolyType::Zero); + test_recursive_ipa(poly, x); +} + +TEST_F(IPARecursiveTests, RecursiveMediumZeroChallenge) +{ + static constexpr size_t log_poly_length = 10; + auto [poly, x] = generate_poly_and_challenge(PolyType::Random); + test_recursive_ipa(poly, Fr::zero()); +} + +TEST_F(IPARecursiveTests, RecursiveMediumZeroEvaluation) +{ + static constexpr size_t log_poly_length = 10; + auto [poly, x] = generate_poly_and_challenge(PolyType::Random); + auto initial_evaluation = poly.evaluate(x); + poly.at(1) -= initial_evaluation / x; + test_recursive_ipa(poly, x); } /** * @brief Tests IPA recursion with polynomial of length 1<(); + auto [poly, x] = generate_poly_and_challenge(PolyType::Random); + test_recursive_ipa(poly, x); +} + +/** + * @brief Tests IPA failure modes + * + */ +TEST_F(IPARecursiveTests, RecursiveMediumRandomFailure) +{ + static constexpr size_t log_poly_length = 10; + auto [poly, x] = generate_poly_and_challenge(PolyType::Random); + test_recursive_ipa(poly, x, FailureMode::A_Zero); + test_recursive_ipa(poly, x, FailureMode::G_Zero); + test_recursive_ipa(poly, x, FailureMode::ChangePoly); } /** * @brief Test accumulation with polynomials of length 4 - * @details More details in test_accumulation */ -TEST_F(IPARecursiveTests, AccumulateSmall) +TEST_F(IPARecursiveTests, AccumulateSmallRandom) { static constexpr size_t log_poly_length = 2; - test_accumulation(); + auto [poly1, x1] = generate_poly_and_challenge(PolyType::Random); + auto [poly2, x2] = generate_poly_and_challenge(PolyType::Random); + test_accumulation(poly1, poly2, x1, x2); } /** * @brief Test accumulation with polynomials of length 1024 - * @details More details in test_accumulation */ -TEST_F(IPARecursiveTests, AccumulateMedium) +TEST_F(IPARecursiveTests, AccumulateMediumRandom) +{ + static constexpr size_t log_poly_length = 10; + auto [poly1, x1] = generate_poly_and_challenge(); + auto [poly2, x2] = generate_poly_and_challenge(); + test_accumulation(poly1, poly2, x1, x2); +} +TEST_F(IPARecursiveTests, AccumulateMediumFirstZeroPoly) { static constexpr size_t log_poly_length = 10; - test_accumulation(); + static constexpr size_t poly_length = 1UL << log_poly_length; + Polynomial poly1(poly_length); + auto x1 = this->random_element(); + auto [poly2, x2] = generate_poly_and_challenge(); + test_accumulation(poly1, poly2, x1, x2); +} +TEST_F(IPARecursiveTests, AccumulateMediumBothZeroPoly) +{ + static constexpr size_t log_poly_length = 10; + static constexpr size_t poly_length = 1UL << log_poly_length; + Polynomial poly1(poly_length); + Polynomial poly2(poly_length); + auto x1 = this->random_element(); + auto x2 = this->random_element(); + test_accumulation(poly1, poly2, x1, x2); +} +TEST_F(IPARecursiveTests, AccumulateMediumSparseManyZeros) +{ + static constexpr size_t log_poly_length = 10; + auto [poly1, x1] = generate_poly_and_challenge(PolyType::Sparse); + auto [poly2, x2] = generate_poly_and_challenge(PolyType::ManyZeros); + test_accumulation(poly1, poly2, x1, x2); } -TEST_F(IPARecursiveTests, FullRecursiveVerifier) +TEST_F(IPARecursiveTests, FullRecursiveVerifierMediumZeroPoly) { static constexpr size_t log_poly_length = 10; static constexpr size_t poly_length = 1UL << log_poly_length; using RecursiveIPA = IPA; - // + Builder builder; - auto [stdlib_transcript, stdlib_claim] = create_ipa_claim(builder); + Polynomial poly(poly_length); + auto x = this->random_element(); + auto [stdlib_transcript, stdlib_claim] = create_ipa_claim(builder, poly, x); VerifierCommitmentKey stdlib_pcs_vkey(&builder, poly_length, this->vk()); auto result = RecursiveIPA::full_verify_recursive(stdlib_pcs_vkey, stdlib_claim, stdlib_transcript); @@ -213,9 +385,32 @@ TEST_F(IPARecursiveTests, FullRecursiveVerifier) EXPECT_TRUE(CircuitChecker::check(builder)); } -TEST_F(IPARecursiveTests, AccumulationAndFullRecursiveVerifier) +TEST_F(IPARecursiveTests, FullRecursiveVerifierMediumRandom) { + static constexpr size_t log_poly_length = 10; + static constexpr size_t poly_length = 1UL << log_poly_length; + using RecursiveIPA = IPA; + + Builder builder; + auto [poly, x] = generate_poly_and_challenge(); + auto [stdlib_transcript, stdlib_claim] = create_ipa_claim(builder, poly, x); + + VerifierCommitmentKey stdlib_pcs_vkey(&builder, poly_length, this->vk()); + auto result = RecursiveIPA::full_verify_recursive(stdlib_pcs_vkey, stdlib_claim, stdlib_transcript); + EXPECT_TRUE(result); + builder.finalize_circuit(/*ensure_nonzero=*/true); + info("Full IPA Recursive Verifier num finalized gates for length ", + 1UL << log_poly_length, + " = ", + builder.get_num_finalized_gates()); + EXPECT_TRUE(CircuitChecker::check(builder)); +} + +TEST_F(IPARecursiveTests, AccumulationAndFullRecursiveVerifierMediumRandom) +{ + static constexpr size_t log_poly_length = 10; + using RecursiveIPA = IPA; // We create a circuit that does two IPA verifications. However, we don't do the full verifications and instead @@ -223,13 +418,15 @@ TEST_F(IPARecursiveTests, AccumulationAndFullRecursiveVerifier) // contain the commitment and an opening claim. Builder builder; - auto [transcript_1, claim_1] = create_ipa_claim(builder); - auto [transcript_2, claim_2] = create_ipa_claim(builder); + auto [poly1, x1] = generate_poly_and_challenge(); + auto [poly2, x2] = generate_poly_and_challenge(); + + auto [transcript_1, claim_1] = create_ipa_claim(builder, poly1, x1); + auto [transcript_2, claim_2] = create_ipa_claim(builder, poly2, x2); // Creates two IPA accumulators and accumulators from the two claims. Also constructs the accumulated h // polynomial. - auto [output_claim, ipa_proof] = - RecursiveIPA::template accumulate(this->ck(), transcript_1, claim_1, transcript_2, claim_2); + auto [output_claim, ipa_proof] = RecursiveIPA::accumulate(this->ck(), transcript_1, claim_1, transcript_2, claim_2); output_claim.set_public(); builder.ipa_proof = ipa_proof; builder.finalize_circuit(/*ensure_nonzero=*/false); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp index 09f10743d3a8..28bdfb2eea8e 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp @@ -48,14 +48,11 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); auto run_shplemini = [](size_t log_circuit_size) { - using diff_t = std::vector::difference_type; - size_t N = 1 << log_circuit_size; - const auto padding_indicator_array = - stdlib::compute_padding_indicator_array(log_circuit_size); + const std::vector padding_indicator_array(CONST_PROOF_SIZE_LOG_N, 1); constexpr size_t NUM_POLYS = 5; constexpr size_t NUM_SHIFTED = 2; - constexpr size_t NUM_RIGHT_SHIFTED_BY_K = 1; + constexpr size_t NUM_RIGHT_SHIFTED_BY_K = 0; CommitmentKey commitment_key(16384); @@ -65,12 +62,8 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) u_challenge.emplace_back(NativeFr::random_element(&shplemini_engine)); }; - // Truncate to real size to create mock claims. - std::vector truncated_u_challenge(u_challenge.begin(), - u_challenge.begin() + static_cast(log_circuit_size)); // Construct mock multivariate polynomial opening claims - MockClaimGen mock_claims( - N, NUM_POLYS, NUM_SHIFTED, NUM_RIGHT_SHIFTED_BY_K, truncated_u_challenge, commitment_key); + MockClaimGen mock_claims(N, NUM_POLYS, NUM_SHIFTED, NUM_RIGHT_SHIFTED_BY_K, u_challenge, commitment_key); // Initialize an empty NativeTranscript auto prover_transcript = NativeTranscript::prover_init_empty(); @@ -90,7 +83,11 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) commitments.end(), commitments_in_biggroup.begin(), [&builder](const auto& native_commitment) { - return Commitment::from_witness(&builder, native_commitment); + auto comm = Commitment::from_witness(&builder, native_commitment); + // Removing the free witness tag, since the commitment in the full scheme are supposed to + // be fiat-shamirred earlier + comm.unset_free_witness_tag(); + return comm; }); return commitments_in_biggroup; }; @@ -98,7 +95,11 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) std::vector elements_in_circuit(elements.size()); std::transform( elements.begin(), elements.end(), elements_in_circuit.begin(), [&builder](const auto& native_element) { - return Fr::from_witness(&builder, native_element); + auto element = Fr::from_witness(&builder, native_element); + // Removing the free witness tag, since the element in the full scheme are supposed to + // be fiat-shamirred earlier + element.unset_free_witness_tag(); + return element; }); return elements_in_circuit; }; @@ -118,6 +119,9 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) for (auto u : u_challenge) { u_challenge_in_circuit.emplace_back(Fr::from_witness(&builder, u)); + // Removing the free witness tag, since the u_challenge in the full scheme are supposed to + // be derived from the transcript earlier + u_challenge_in_circuit.back().unset_free_witness_tag(); } ClaimBatcher claim_batcher{ @@ -139,7 +143,7 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) VerifierCommitmentKey vk; EXPECT_EQ(vk.pairing_check(pairing_points[0].get_value(), pairing_points[1].get_value()), true); - // Return finalised number of gates; + // Return finalized number of gates; return builder.num_gates; }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplonk.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplonk.test.cpp index 90077494ab20..f352eccf449f 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplonk.test.cpp @@ -26,6 +26,11 @@ template class ShplonkRecursionTest : public CommitmentTest({ r, eval }, commit)); } @@ -42,6 +47,11 @@ template class ShplonkRecursionTest : public CommitmentTest(r, eval)); stdlib_commitments.emplace_back(commit); } @@ -95,7 +105,7 @@ TYPED_TEST(ShplonkRecursionTest, Simple) EXPECT_TRUE(CircuitChecker::check(builder)); } -TYPED_TEST(ShplonkRecursionTest, LineralyDependent) +TYPED_TEST(ShplonkRecursionTest, LinearlyDependent) { using Builder = TypeParam; using Curve = stdlib::bn254; @@ -136,6 +146,10 @@ TYPED_TEST(ShplonkRecursionTest, LineralyDependent) auto coeff1 = Fr::from_witness(&builder, coefficients[0]); auto coeff2 = Fr::from_witness(&builder, coefficients[1]); + // Removing the free witness tag, since the coefficients in the full scheme are supposed to + // be fiat-shamirred or derived from the transcript earlier + coeff1.unset_free_witness_tag(); + coeff2.unset_free_witness_tag(); // Convert opening claims to witnesses auto stdlib_opening_claims = @@ -148,6 +162,10 @@ TYPED_TEST(ShplonkRecursionTest, LineralyDependent) // Opening pair for the linear combination as it would be received by the Verifier from the Prover Fr r = Fr::from_witness(&builder, native_opening_claims[2].opening_pair.challenge); Fr eval = Fr::from_witness(&builder, native_opening_claims[2].opening_pair.evaluation); + // Removing the free witness tag, since the opening pairs in the full scheme are supposed to + // be fiat-shamirred or derived from the transcript earlier + r.unset_free_witness_tag(); + eval.unset_free_witness_tag(); // Opening claim for the linear combination stdlib_opening_claims.emplace_back(OpeningClaim({ r, eval }, commit)); @@ -176,6 +194,10 @@ TYPED_TEST(ShplonkRecursionTest, LineralyDependent) auto coeff1 = Fr::from_witness(&builder, coefficients[0]); auto coeff2 = Fr::from_witness(&builder, coefficients[1]); + // Removing the free witness tag, since the coefficients in the full scheme are supposed to + // be fiat-shamirred or derived from the transcript earlier + coeff1.unset_free_witness_tag(); + coeff2.unset_free_witness_tag(); // Convert opening claims to witnesses auto [stdlib_commitments, stdlib_opening_pairs] = this->native_to_stdlib_pairs_and_commitments( @@ -184,6 +206,10 @@ TYPED_TEST(ShplonkRecursionTest, LineralyDependent) // Opening pair for the linear combination as it would be received by the Verifier from the Prover Fr r = Fr::from_witness(&builder, native_opening_claims[2].opening_pair.challenge); Fr eval = Fr::from_witness(&builder, native_opening_claims[2].opening_pair.evaluation); + // Removing the free witness tag, since the opening pairs in the full scheme are supposed to + // be fiat-shamirred or derived from the transcript earlier + r.unset_free_witness_tag(); + eval.unset_free_witness_tag(); // Update data std::vector update_data = { diff --git a/barretenberg/cpp/src/barretenberg/common/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/common/CMakeLists.txt index d8b2aaa6f1b1..ddcdaffe5eed 100644 --- a/barretenberg/cpp/src/barretenberg/common/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/common/CMakeLists.txt @@ -1,2 +1,2 @@ add_subdirectory(tracy_mem) -barretenberg_module(common env) +barretenberg_module(common env libdeflate::libdeflate_static nlohmann_json::nlohmann_json) diff --git a/barretenberg/cpp/src/barretenberg/common/assert.cpp b/barretenberg/cpp/src/barretenberg/common/assert.cpp new file mode 100644 index 000000000000..0123d289e180 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/assert.cpp @@ -0,0 +1,19 @@ +#include "barretenberg/common/assert.hpp" +#include "barretenberg/common/throw_or_abort.hpp" + +namespace bb { +AssertMode& get_assert_mode() +{ + static AssertMode current_mode = AssertMode::ABORT; + return current_mode; +} + +void assert_failure(std::string const& err) +{ + if (get_assert_mode() == AssertMode::WARN) { + info("NOT FOR PROD - assert as warning: ", err); + return; + } + throw_or_abort(err); +} +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/common/assert.hpp b/barretenberg/cpp/src/barretenberg/common/assert.hpp index df2242cdd102..50485dfdbf99 100644 --- a/barretenberg/cpp/src/barretenberg/common/assert.hpp +++ b/barretenberg/cpp/src/barretenberg/common/assert.hpp @@ -1,8 +1,36 @@ #pragma once +#include "barretenberg/common/bb_bench.hpp" +#include "barretenberg/common/compiler_hints.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include #include +// Enable this for (VERY SLOW) stats on which asserts are hit the most. Note that the time measured will be very +// inaccurate, but you can still see what is called too often to be in a release build. +// #define BB_BENCH_ASSERT(x) BB_BENCH_NAME(x) +#define BB_BENCH_ASSERT(x) + +namespace bb { +enum class AssertMode : std::uint8_t { ABORT, WARN }; +AssertMode& get_assert_mode(); +void assert_failure(std::string const& err); + +// NOTE do not use in threaded contexts! +struct AssertGuard { + AssertGuard(AssertMode mode) + : previous_mode(get_assert_mode()) + { + get_assert_mode() = mode; + } + ~AssertGuard() { get_assert_mode() = (previous_mode); } + AssertMode previous_mode; +}; +} // namespace bb + +// NOTE do not use in threaded contexts! +#define BB_DISABLE_ASSERTS() bb::AssertGuard __bb_assert_guard(bb::AssertMode::WARN) + // NOLINTBEGIN // Compiler should optimize this out in release builds, without triggering unused-variable warnings. #define DONT_EVALUATE(expression) \ @@ -31,6 +59,7 @@ #define ASSERT(expression, ...) DONT_EVALUATE((expression)) #define BB_ASSERT_EQ(actual, expected, ...) DONT_EVALUATE((actual) == (expected)) +#define BB_ASSERT_NEQ(actual, expected, ...) DONT_EVALUATE((actual) != (expected)) #define BB_ASSERT_GT(left, right, ...) DONT_EVALUATE((left) > (right)) #define BB_ASSERT_GTE(left, right, ...) DONT_EVALUATE((left) >= (right)) #define BB_ASSERT_LT(left, right, ...) DONT_EVALUATE((left) < (right)) @@ -38,90 +67,111 @@ #else #define ASSERT_IN_CONSTEXPR(expression, ...) \ do { \ - if (!(expression)) { \ + if (!(BB_LIKELY(expression))) { \ info("Assertion failed: (" #expression ")"); \ __VA_OPT__(info("Reason : ", __VA_ARGS__);) \ - throw_or_abort(""); \ + bb::assert_failure(""); \ } \ } while (0) #define ASSERT(expression, ...) \ do { \ - if (!(expression)) { \ + BB_BENCH_ASSERT("ASSERT" #expression); \ + if (!(BB_LIKELY(expression))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #expression ")"; \ __VA_OPT__(oss << " | Reason: " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ } \ } while (0) #define BB_ASSERT_EQ(actual, expected, ...) \ do { \ + BB_BENCH_ASSERT("BB_ASSERT_EQ" #actual " == " #expected); \ auto _actual = (actual); \ auto _expected = (expected); \ - if (!(_actual == _expected)) { \ + if (!(BB_LIKELY(_actual == _expected))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #actual " == " #expected ")\n"; \ oss << " Actual : " << _actual << "\n"; \ oss << " Expected: " << _expected; \ __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ + } \ + } while (0) + +#define BB_ASSERT_NEQ(actual, expected, ...) \ + do { \ + BB_BENCH_ASSERT("BB_ASSERT_NEQ" #actual " != " #expected); \ + auto _actual = (actual); \ + auto _expected = (expected); \ + if (!(BB_LIKELY(_actual != _expected))) { \ + std::ostringstream oss; \ + oss << "Assertion failed: (" #actual " != " #expected ")\n"; \ + oss << " Actual : " << _actual << "\n"; \ + oss << " Not expected: " << _expected; \ + __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ + bb::assert_failure(oss.str()); \ } \ } while (0) #define BB_ASSERT_GT(left, right, ...) \ do { \ + BB_BENCH_ASSERT("BB_ASSERT_GT" #left " > " #right); \ auto _left = (left); \ auto _right = (right); \ - if (!(_left > _right)) { \ + if (!(BB_LIKELY(_left > _right))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #left " > " #right ")\n"; \ oss << " Left : " << _left << "\n"; \ oss << " Right : " << _right; \ __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ } \ } while (0) #define BB_ASSERT_GTE(left, right, ...) \ do { \ + BB_BENCH_ASSERT("BB_ASSERT_GTE" #left " >= " #right); \ auto _left = (left); \ auto _right = (right); \ - if (!(_left >= _right)) { \ + if (!(BB_LIKELY(_left >= _right))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #left " >= " #right ")\n"; \ oss << " Left : " << _left << "\n"; \ oss << " Right : " << _right; \ __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ } \ } while (0) #define BB_ASSERT_LT(left, right, ...) \ do { \ + BB_BENCH_ASSERT("BB_ASSERT_LT" #left " < " #right); \ auto _left = (left); \ auto _right = (right); \ - if (!(_left < _right)) { \ + if (!(BB_LIKELY(_left < _right))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #left " < " #right ")\n"; \ oss << " Left : " << _left << "\n"; \ oss << " Right : " << _right; \ __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ } \ } while (0) #define BB_ASSERT_LTE(left, right, ...) \ do { \ + BB_BENCH_ASSERT("BB_ASSERT_LTE" #left " <= " #right); \ auto _left = (left); \ auto _right = (right); \ - if (!(_left <= _right)) { \ + if (!(BB_LIKELY(_left <= _right))) { \ std::ostringstream oss; \ oss << "Assertion failed: (" #left " <= " #right ")\n"; \ oss << " Left : " << _left << "\n"; \ oss << " Right : " << _right; \ __VA_OPT__(oss << "\n Reason : " << __VA_ARGS__;) \ - throw_or_abort(oss.str()); \ + bb::assert_failure(oss.str()); \ } \ } while (0) #endif // __wasm__ diff --git a/barretenberg/cpp/src/barretenberg/common/base64.cpp b/barretenberg/cpp/src/barretenberg/common/base64.cpp new file mode 100644 index 000000000000..1325c879d6c7 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/base64.cpp @@ -0,0 +1,296 @@ +/* + base64.cpp and base64.h + + base64 encoding and decoding with C++. + More information at + https://renenyffenegger.ch/notes/development/Base64/Encoding-and-decoding-base-64-with-cpp + + Version: 2.rc.08 (release candidate) + + Copyright (C) 2004-2017, 2020, 2021 René Nyffenegger + + This source code is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + + 3. This notice may not be removed or altered from any source distribution. + + René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +*/ + +#include "base64.hpp" +#include "throw_or_abort.hpp" + +#include + +// +// Depending on the url parameter in base64_chars, one of +// two sets of base64 characters needs to be chosen. +// They differ in their last two characters. +// +static const char* base64_chars[2] = { "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789" + "+/", + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789" + "-_" }; + +static unsigned int pos_of_char(const unsigned char chr) +{ + // + // Return the position of chr within base64_encode() + // + + if (chr >= 'A' && chr <= 'Z') + return chr - 'A'; + else if (chr >= 'a' && chr <= 'z') + return chr - 'a' + ('Z' - 'A') + 1; + else if (chr >= '0' && chr <= '9') + return chr - '0' + ('Z' - 'A') + ('z' - 'a') + 2; + else if (chr == '+' || chr == '-') + return 62; // Be liberal with input and accept both url ('-') and non-url ('+') base 64 characters ( + else if (chr == '/' || chr == '_') + return 63; // Ditto for '/' and '_' + else + // + // 2020-10-23: Throw std::exception rather than const char* + //(Pablo Martin-Gomez, https://github.com/Bouska) + // + throw_or_abort("Input is not valid base64-encoded data."); +} + +static std::string insert_linebreaks(std::string str, size_t distance) +{ + // + // Provided by https://github.com/JomaCorpFX, adapted by me. + // + if (!str.length()) { + return ""; + } + + size_t pos = distance; + + while (pos < str.size()) { + str.insert(pos, "\n"); + pos += distance + 1; + } + + return str; +} + +template static std::string encode_with_line_breaks(String s) +{ + return insert_linebreaks(base64_encode(s, false), line_length); +} + +template static std::string encode_pem(String s) +{ + return encode_with_line_breaks(s); +} + +template static std::string encode_mime(String s) +{ + return encode_with_line_breaks(s); +} + +template static std::string encode(String s, bool url) +{ + return base64_encode(reinterpret_cast(s.data()), s.length(), url); +} + +std::string base64_encode(unsigned char const* bytes_to_encode, size_t in_len, bool url) +{ + + size_t len_encoded = (in_len + 2) / 3 * 4; + + unsigned char trailing_char = url ? '.' : '='; + + // + // Choose set of base64 characters. They differ + // for the last two positions, depending on the url + // parameter. + // A bool (as is the parameter url) is guaranteed + // to evaluate to either 0 or 1 in C++ therefore, + // the correct character set is chosen by subscripting + // base64_chars with url. + // + const char* base64_chars_ = base64_chars[url]; + + std::string ret; + ret.reserve(len_encoded); + + unsigned int pos = 0; + + while (pos < in_len) { + ret.push_back(base64_chars_[(bytes_to_encode[pos + 0] & 0xfc) >> 2]); + + if (pos + 1 < in_len) { + ret.push_back( + base64_chars_[((bytes_to_encode[pos + 0] & 0x03) << 4) + ((bytes_to_encode[pos + 1] & 0xf0) >> 4)]); + + if (pos + 2 < in_len) { + ret.push_back( + base64_chars_[((bytes_to_encode[pos + 1] & 0x0f) << 2) + ((bytes_to_encode[pos + 2] & 0xc0) >> 6)]); + ret.push_back(base64_chars_[bytes_to_encode[pos + 2] & 0x3f]); + } else { + ret.push_back(base64_chars_[(bytes_to_encode[pos + 1] & 0x0f) << 2]); + ret.push_back(static_cast(trailing_char)); + } + } else { + + ret.push_back(base64_chars_[(bytes_to_encode[pos + 0] & 0x03) << 4]); + ret.push_back(static_cast(trailing_char)); + ret.push_back(static_cast(trailing_char)); + } + + pos += 3; + } + + return ret; +} + +template static std::string decode(String encoded_string, bool remove_linebreaks) +{ + // + // decode(…) is templated so that it can be used with String = const std::string& + // or std::string_view (requires at least C++17) + // + + if (encoded_string.empty()) + return std::string(); + + if (remove_linebreaks) { + + std::string copy(encoded_string); + + copy.erase(std::remove(copy.begin(), copy.end(), '\n'), copy.end()); + + return base64_decode(copy, false); + } + + size_t length_of_string = encoded_string.length(); + size_t pos = 0; + + // + // The approximate length (bytes) of the decoded string might be one or + // two bytes smaller, depending on the amount of trailing equal signs + // in the encoded string. This approximation is needed to reserve + // enough space in the string to be returned. + // + size_t approx_length_of_decoded_string = length_of_string / 4 * 3; + std::string ret; + ret.reserve(approx_length_of_decoded_string); + + while (pos < length_of_string) { + // + // Iterate over encoded input string in chunks. The size of all + // chunks except the last one is 4 bytes. + // + // The last chunk might be padded with equal signs or dots + // in order to make it 4 bytes in size as well, but this + // is not required as per RFC 2045. + // + // All chunks except the last one produce three output bytes. + // + // The last chunk produces at least one and up to three bytes. + // + + size_t pos_of_char_1 = pos_of_char(static_cast(encoded_string[pos + 1])); + + // + // Emit the first output byte that is produced in each chunk: + // + ret.push_back(static_cast( + ((pos_of_char(static_cast(encoded_string[pos + 0])) << 2) + ((pos_of_char_1 & 0x30) >> 4)))); + + if ((pos + 2 < length_of_string) && // Check for data that is not padded with equal signs (which is + // allowed by RFC 2045) + encoded_string[pos + 2] != '=' && + encoded_string[pos + 2] != '.' // accept URL-safe base 64 strings, too, so check for '.' also. + ) { + // + // Emit a chunk's second byte (which might not be produced in the last chunk). + // + unsigned int pos_of_char_2 = pos_of_char(static_cast(encoded_string[pos + 2])); + ret.push_back( + static_cast(((pos_of_char_1 & 0x0f) << 4) + ((pos_of_char_2 & 0x3c) >> 2))); + + if ((pos + 3 < length_of_string) && encoded_string[pos + 3] != '=' && encoded_string[pos + 3] != '.') { + // + // Emit a chunk's third byte (which might not be produced in the last chunk). + // + ret.push_back(static_cast( + ((pos_of_char_2 & 0x03) << 6) + pos_of_char(static_cast(encoded_string[pos + 3])))); + } + } + + pos += 4; + } + + return ret; +} + +std::string base64_decode(std::string const& s, bool remove_linebreaks) +{ + return decode(s, remove_linebreaks); +} + +std::string base64_encode(std::string const& s, bool url) +{ + return encode(s, url); +} + +std::string base64_encode_pem(std::string const& s) +{ + return encode_pem(s); +} + +std::string base64_encode_mime(std::string const& s) +{ + return encode_mime(s); +} + +#if __cplusplus >= 201703L +// +// Interface with std::string_view rather than const std::string& +// Requires C++17 +// Provided by Yannic Bonenberger (https://github.com/Yannic) +// + +std::string base64_encode(std::string_view s, bool url) +{ + return encode(s, url); +} + +std::string base64_encode_pem(std::string_view s) +{ + return encode_pem(s); +} + +std::string base64_encode_mime(std::string_view s) +{ + return encode_mime(s); +} + +std::string base64_decode(std::string_view s, bool remove_linebreaks) +{ + return decode(s, remove_linebreaks); +} + +#endif // __cplusplus >= 201703L diff --git a/barretenberg/cpp/src/barretenberg/common/base64.hpp b/barretenberg/cpp/src/barretenberg/common/base64.hpp new file mode 100644 index 000000000000..e5b29c40e373 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/base64.hpp @@ -0,0 +1,35 @@ +// +// base64 encoding and decoding with C++. +// Version: 2.rc.08 (release candidate) +// +// Copyright (C) 2004-2017, 2020, 2021 René Nyffenegger +// https://renenyffenegger.ch/notes/development/Base64/Encoding-and-decoding-base-64-with-cpp +// + +#pragma once + +#include + +#if __cplusplus >= 201703L +#include +#endif // __cplusplus >= 201703L + +std::string base64_encode(std::string const& s, bool url = false); +std::string base64_encode_pem(std::string const& s); +std::string base64_encode_mime(std::string const& s); + +std::string base64_decode(std::string const& s, bool remove_linebreaks = false); +std::string base64_encode(unsigned char const*, size_t len, bool url = false); + +#if __cplusplus >= 201703L +// +// Interface with std::string_view rather than const std::string& +// Requires C++17 +// Provided by Yannic Bonenberger (https://github.com/Yannic) +// +std::string base64_encode(std::string_view s, bool url = false); +std::string base64_encode_pem(std::string_view s); +std::string base64_encode_mime(std::string_view s); + +std::string base64_decode(std::string_view s, bool remove_linebreaks = false); +#endif // __cplusplus >= 201703L diff --git a/barretenberg/cpp/src/barretenberg/common/bb_bench.cpp b/barretenberg/cpp/src/barretenberg/common/bb_bench.cpp new file mode 100644 index 000000000000..e2472e8556c2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/bb_bench.cpp @@ -0,0 +1,599 @@ +#include "barretenberg/common/assert.hpp" +#include +#include +#ifndef __wasm__ +#include "bb_bench.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { +// ANSI color codes +struct Colors { + static constexpr const char* WHITE = "\033[37m"; + static constexpr const char* RESET = "\033[0m"; + static constexpr const char* BOLD = "\033[1m"; + static constexpr const char* CYAN = "\033[36m"; + static constexpr const char* GREEN = "\033[32m"; + static constexpr const char* YELLOW = "\033[33m"; + static constexpr const char* MAGENTA = "\033[35m"; + static constexpr const char* DIM = "\033[2m"; + static constexpr const char* RED = "\033[31m"; +}; + +// Format time value with appropriate unit +std::string format_time(double time_ms) +{ + std::ostringstream oss; + if (time_ms >= 1000.0) { + oss << std::fixed << std::setprecision(2) << (time_ms / 1000.0) << " s"; + } else if (time_ms >= 1.0 && time_ms < 1000.0) { + oss << std::fixed << std::setprecision(2) << time_ms << " ms"; + } else { + oss << std::fixed << std::setprecision(1) << (time_ms * 1000.0) << " μs"; + } + return oss.str(); +} + +// Format time with fixed width for alignment +std::string format_time_aligned(double time_ms) +{ + std::ostringstream oss; + if (time_ms >= 1000.0) { + std::ostringstream time_oss; + time_oss << std::fixed << std::setprecision(2) << (time_ms / 1000.0) << "s"; + oss << std::left << std::setw(10) << time_oss.str(); + } else { + std::ostringstream time_oss; + time_oss << std::fixed << std::setprecision(1) << time_ms << "ms"; + oss << std::left << std::setw(10) << time_oss.str(); + } + return oss.str(); +} + +// Helper to format percentage value +std::string format_percentage_value(double percentage, const char* color) +{ + std::ostringstream oss; + oss << color << " " << std::left << std::fixed << std::setprecision(1) << std::setw(5) << percentage << "%" + << Colors::RESET; + return oss.str(); +} + +// Helper to format percentage with color based on percentage value +std::string format_percentage(double value, double total, double min_threshold = 0.0) +{ + double percentage = (total <= 0) ? 0.0 : (value / total) * 100.0; + if (total <= 0 || percentage < min_threshold) { + return " "; + } + + // Choose color based on percentage value (like time colors) + const char* color = Colors::CYAN; // Default color + + return format_percentage_value(percentage, color); +} + +// Helper to format percentage section +std::string format_percentage_section(double time_ms, double parent_time, size_t indent_level) +{ + if (parent_time > 0 && indent_level > 0) { + return format_percentage(time_ms * 1000000.0, parent_time); + } + return " "; +} + +// Helper to format time section +std::string format_time_section(double time_ms) +{ + std::ostringstream oss; + oss << " "; + if (time_ms >= 100.0 && time_ms < 1000.0) { + oss << Colors::DIM << format_time_aligned(time_ms) << Colors::RESET; + } else { + oss << format_time_aligned(time_ms); + } + return oss.str(); +} + +// Helper to format call stats +std::string format_call_stats(double time_ms, uint64_t count) +{ + if (!(time_ms >= 100.0 && count > 1)) { + return ""; + } + double avg_ms = time_ms / static_cast(count); + std::ostringstream oss; + oss << Colors::DIM << " (" << format_time(avg_ms) << " x " << count << ")" << Colors::RESET; + return oss.str(); +} + +std::string format_aligned_section(double time_ms, double parent_time, uint64_t count, size_t indent_level) +{ + std::ostringstream oss; + + // Add indent level indicator at the beginning with different color + oss << Colors::MAGENTA << "[" << indent_level << "] " << Colors::RESET; + + // Format percentage FIRST + oss << format_percentage_section(time_ms, parent_time, indent_level); + + // Format time AFTER percentage with appropriate color (with more spacing) + oss << format_time_section(time_ms); + + // Format calls/threads info - only show for >= 100ms, make it DIM + oss << format_call_stats(time_ms, count); + + return oss.str(); +} + +// Get color based on time threshold +struct TimeColor { + const char* name_color; + const char* time_color; +}; + +TimeColor get_time_colors(double time_ms) +{ + if (time_ms >= 1000.0) { + return { Colors::BOLD, Colors::WHITE }; + } + if (time_ms >= 100.0) { + return { Colors::YELLOW, Colors::YELLOW }; + } + return { Colors::DIM, Colors::DIM }; +} + +// Print separator line +void print_separator(std::ostream& os, bool thick = true) +{ + const char* line = thick ? "═══════════════════════════════════════════════════════════════════════════════════════" + "═════════════════════" + : "───────────────────────────────────────────────────────────────────────────────────────" + "─────────────────────"; + os << Colors::BOLD << Colors::CYAN << line << Colors::RESET << "\n"; +} +} // anonymous namespace + +namespace bb::detail { + +// use_bb_bench is also set by --print_bench and --bench_out flags +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +bool use_bb_bench = std::getenv("BB_BENCH") == nullptr ? false : std::string(std::getenv("BB_BENCH")) == "1"; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +using OperationKey = std::string_view; + +void AggregateEntry::add_thread_time_sample(const TimeAndCount& stats) +{ + if (stats.count == 0) { + return; + } + // Account for aggregate time and count + time += stats.time; + count += stats.count; + time_max = std::max(static_cast(stats.time), time_max); + // Use Welford's method to be able to track the variance + num_threads++; + double delta = static_cast(stats.time) - time_mean; + time_mean += delta / static_cast(num_threads); + double delta2 = static_cast(stats.time) - time_mean; + time_m2 += delta * delta2; +} + +double AggregateEntry::get_std_dev() const +{ + // Calculate standard deviation + if (num_threads > 1) { + return std::sqrt(time_m2 / static_cast(num_threads - 1)); + } + return 0; +} + +// Normalize the raw benchmark data into a clean structure for display +AggregateData GlobalBenchStatsContainer::aggregate() const +{ + AggregateData result; + + // Each count has a unique [thread, key] combo. + // We therefore treat each count as a thread's contribution to that key. + for (const std::shared_ptr& entry : entries) { + // A map from parent key => AggregateEntry + auto& entry_map = result[entry->key]; + // combine all entries with same parent key + std::map parent_key_to_stats; + + // For collection-time performance, we allow multiple stat blocks with the same parent. It'd be simpler to have + // one but we just have to combine them here. + for (const TimeStats* stats = &entry->count; stats != nullptr; stats = stats->next.get()) { + OperationKey parent_key = stats->parent != nullptr ? stats->parent->key : ""; + parent_key_to_stats[parent_key].count += stats->count; + parent_key_to_stats[parent_key].time += stats->time; + } + + for (auto [parent_key, stats] : parent_key_to_stats) { + auto& normalized_entry = entry_map[parent_key]; + normalized_entry.key = entry->key; + normalized_entry.parent = parent_key; + normalized_entry.add_thread_time_sample(stats); + } + } + + return result; +} + +GlobalBenchStatsContainer::~GlobalBenchStatsContainer() +{ + if (std::getenv("BB_BENCH") != nullptr) { + print_aggregate_counts_hierarchical(std::cerr); + } +} + +void GlobalBenchStatsContainer::add_entry(const char* key, const std::shared_ptr& entry) +{ + std::unique_lock lock(mutex); + entry->key = key; + entries.push_back(entry); +} + +void GlobalBenchStatsContainer::print() const +{ + std::cout << "GlobalBenchStatsContainer::print() START" << "\n"; + for (const std::shared_ptr& entry : entries) { + print_stats_recursive(entry->key, &entry->count, ""); + } + std::cout << "GlobalBenchStatsContainer::print() END" << "\n"; +} + +void GlobalBenchStatsContainer::print_stats_recursive(const OperationKey& key, + const TimeStats* stats, + const std::string& indent) const +{ + if (stats->count > 0) { + std::cout << indent << key << "\t" << stats->count << "\n"; + } + if (stats->time > 0) { + std::cout << indent << key << "(t)\t" << static_cast(stats->time) / 1000000.0 << "ms\n"; + } + + if (stats->next != nullptr) { + print_stats_recursive(key, stats->next.get(), indent + " "); + } +} + +void GlobalBenchStatsContainer::print_aggregate_counts(std::ostream& os, size_t indent) const +{ + os << '{'; + bool first = true; + for (const auto& [key, entry_map] : aggregate()) { + // Loop for a flattened view + uint64_t time = 0; + for (auto& [parent_key, entry] : entry_map) { + time += entry.time_max; + } + + if (!first) { + os << ','; + } + if (indent > 0) { + os << "\n" << std::string(indent, ' '); + } + os << '"' << key << "\":" << time; + first = false; + } + if (indent > 0) { + os << "\n"; + } + os << '}' << "\n"; +} + +void GlobalBenchStatsContainer::print_aggregate_counts_hierarchical(std::ostream& os) const +{ + AggregateData aggregated = aggregate(); + + if (aggregated.empty()) { + os << "No benchmark data collected\n"; + return; + } + + // Print header + os << "\n"; + print_separator(os, true); + os << Colors::BOLD << " Benchmark Results" << Colors::RESET << "\n"; + print_separator(os, true); + + std::map> keys_to_parents; + std::set printed_in_detail; + for (auto& [key, entry_map] : aggregated) { + for (auto& [parent_key, entry] : entry_map) { + if (entry.count > 0) { + keys_to_parents[key].insert(parent_key); + } + } + } + + // Helper function to print a stat line with tree drawing + auto print_entry = [&](const AggregateEntry& entry, size_t indent_level, bool is_last, uint64_t parent_time) { + std::string indent(indent_level * 2, ' '); + std::string prefix = (indent_level == 0) ? "" : (is_last ? "└─ " : "├─ "); + + // Use exactly 80 characters for function name without indent + const size_t name_width = 80; + std::string display_name = std::string(entry.key); + if (display_name.length() > name_width) { + display_name = display_name.substr(0, name_width - 3) + "..."; + } + + double time_ms = static_cast(entry.time_max) / 1000000.0; + auto colors = get_time_colors(time_ms); + + // Print indent + prefix + name (exactly 80 chars) + time/percentage/calls + os << indent << prefix << colors.name_color; + if (time_ms >= 1000.0 && colors.name_color == Colors::BOLD) { + os << Colors::YELLOW; // Special case: bold yellow for >= 1s + } + os << std::left << std::setw(static_cast(name_width)) << display_name << Colors::RESET; + + // Print time if available with aligned section including indent level + if (entry.time_max > 0) { + if (time_ms < 100.0) { + // Minimal format for <100ms: only [level] and percentage, no time display + std::ostringstream minimal_oss; + minimal_oss << Colors::MAGENTA << "[" << indent_level << "] " << Colors::RESET; + minimal_oss << format_percentage_section(time_ms, static_cast(parent_time), indent_level); + minimal_oss << " " << std::setw(10) << ""; // Add spacing to replace where time would be + os << " " << colors.time_color << std::setw(40) << std::left << minimal_oss.str() << Colors::RESET; + } else { + std::string aligned_section = + format_aligned_section(time_ms, static_cast(parent_time), entry.count, indent_level); + os << " " << colors.time_color << std::setw(40) << std::left << aligned_section << Colors::RESET; + if (entry.num_threads > 1) { + double mean_ms = entry.time_mean / 1000000.0; + double stddev_percentage = floor(entry.get_std_dev() * 100 / entry.time_mean); + os << " " << entry.num_threads << " threads " << mean_ms << "ms average " << stddev_percentage + << "% stddev"; + } + } + } + + os << "\n"; + }; + + // Recursive function to print hierarchy + std::function print_hierarchy; + print_hierarchy = [&](OperationKey key, + size_t indent_level, + bool is_last, + uint64_t parent_time, + OperationKey current_parent) -> void { + auto it = aggregated.find(key); + if (it == aggregated.end()) { + return; + } + + // Find the entry with the specific parent context + const AggregateEntry* entry_to_print = nullptr; + for (const auto& [parent_key, entry] : it->second) { + if ((indent_level == 0 && parent_key.empty()) || (indent_level > 0 && parent_key == current_parent)) { + entry_to_print = &entry; + break; + } + } + + if (!entry_to_print) { + return; + } + + // Print this entry + print_entry(*entry_to_print, indent_level, is_last, parent_time); + + // Find and print children - operations that have this key as parent (only those with meaningful time >= 0.5ms) + std::vector children; + if (!printed_in_detail.contains(key)) { + for (const auto& [child_key, parent_map] : aggregated) { + for (const auto& [parent_key, entry] : parent_map) { + if (parent_key == key && entry.time_max >= 500000) { // 0.5ms in nanoseconds + children.push_back(child_key); + break; + } + } + } + printed_in_detail.insert(key); + } + + // Sort children by their time in THIS parent context + std::ranges::sort(children, [&](OperationKey a, OperationKey b) { + uint64_t time_a = 0; + uint64_t time_b = 0; + if (auto it = aggregated.find(a); it != aggregated.end()) { + for (const auto& [parent_key, entry] : it->second) { + if (parent_key == key) { + time_a = entry.time_max; + break; + } + } + } + if (auto it = aggregated.find(b); it != aggregated.end()) { + for (const auto& [parent_key, entry] : it->second) { + if (parent_key == key) { + time_b = entry.time_max; + break; + } + } + } + return time_a > time_b; + }); + + // Calculate time spent in children and add "(other)" if >5% unaccounted + uint64_t children_total_time = 0; + for (const auto& child_key : children) { + if (auto it = aggregated.find(child_key); it != aggregated.end()) { + for (const auto& [parent_key, entry] : it->second) { + if (parent_key == key && entry.time_max >= 500000) { // 0.5ms in nanoseconds + children_total_time += entry.time_max; + } + } + } + } + uint64_t parent_total_time = entry_to_print->time_max; + bool should_add_other = false; + if (!children.empty() && parent_total_time > 0 && children_total_time < parent_total_time) { + uint64_t unaccounted = parent_total_time - children_total_time; + double percentage = (static_cast(unaccounted) / static_cast(parent_total_time)) * 100.0; + should_add_other = percentage > 5.0 && unaccounted > 0; + } + uint64_t other_time = should_add_other ? (parent_total_time - children_total_time) : 0; + + if (!children.empty() && keys_to_parents[key].size() > 1) { + os << std::string(indent_level * 2, ' ') << " ├─ NOTE: Shared children. Can add up to > 100%.\n"; + } + + // Print children + for (size_t i = 0; i < children.size(); ++i) { + bool is_last_child = (i == children.size() - 1) && !should_add_other; + print_hierarchy(children[i], indent_level + 1, is_last_child, entry_to_print->time, key); + } + + // Print "(other)" category if significant unaccounted time exists + if (should_add_other && keys_to_parents[key].size() <= 1) { + AggregateEntry other_entry; + other_entry.key = "(other)"; + other_entry.time = other_time; + other_entry.time_max = other_time; + other_entry.count = 1; + other_entry.num_threads = 1; + print_entry(other_entry, indent_level + 1, true, parent_total_time); // always last + } + }; + + // Find root entries (those that ONLY have empty parent key and significant time) + std::vector roots; + for (const auto& [key, parent_map] : aggregated) { + auto empty_parent_it = parent_map.find(""); + if (empty_parent_it != parent_map.end() && empty_parent_it->second.time > 0) { + roots.push_back(key); + } + } + + // Sort roots by time (descending) + std::ranges::sort(roots, [&](OperationKey a, OperationKey b) { + uint64_t time_a = 0; + uint64_t time_b = 0; + if (auto it_a = aggregated.find(a); it_a != aggregated.end()) { + if (auto parent_it = it_a->second.find(""); parent_it != it_a->second.end()) { + time_a = parent_it->second.time_max; + } + } + if (auto it_b = aggregated.find(b); it_b != aggregated.end()) { + if (auto parent_it = it_b->second.find(""); parent_it != it_b->second.end()) { + time_b = parent_it->second.time_max; + } + } + return time_a > time_b; + }); + + // Print hierarchies starting from roots + for (size_t i = 0; i < roots.size(); ++i) { + print_hierarchy(roots[i], 0, i == roots.size() - 1, 0, ""); + } + + // Print summary + print_separator(os, false); + + // Calculate totals from root entries + std::set unique_funcs; + for (const auto& [key, _] : aggregated) { + unique_funcs.insert(key); + } + size_t unique_functions_count = unique_funcs.size(); + + uint64_t shared_count = 0; + for (const auto& [key, parents] : keys_to_parents) { + if (parents.size() > 1) { + shared_count++; + } + } + + uint64_t total_time = 0; + for (const auto& [_, parent_map] : aggregated) { + if (auto it = parent_map.find(""); it != parent_map.end()) { + total_time = std::max(static_cast(total_time), it->second.time_max); + } + } + + uint64_t total_calls = 0; + for (const auto& [_, parent_map] : aggregated) { + for (const auto& [__, entry] : parent_map) { + total_calls += entry.count; + } + } + + double total_time_ms = static_cast(total_time) / 1000000.0; + + os << " " << Colors::BOLD << "Total: " << Colors::RESET << Colors::MAGENTA << unique_functions_count + << " functions" << Colors::RESET; + if (shared_count > 0) { + os << " (" << Colors::RED << shared_count << " shared" << Colors::RESET << ")"; + } + os << ", " << Colors::GREEN << total_calls << " measurements" << Colors::RESET << ", " << Colors::YELLOW; + if (total_time_ms >= 1000.0) { + os << std::fixed << std::setprecision(2) << (total_time_ms / 1000.0) << " seconds"; + } else { + os << std::fixed << std::setprecision(2) << total_time_ms << " ms"; + } + os << Colors::RESET; + + os << "\n"; + print_separator(os, true); + os << "\n"; +} + +void GlobalBenchStatsContainer::clear() +{ + std::unique_lock lock(mutex); + for (std::shared_ptr& entry : entries) { + entry->count = TimeStats(); + } +} + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +GlobalBenchStatsContainer GLOBAL_BENCH_STATS; + +BenchReporter::BenchReporter(TimeStatsEntry* entry) + : parent(nullptr) + , stats(entry) + , time(0) +{ + if (stats == nullptr) { + return; + } + // Track the current parent context + parent = GlobalBenchStatsContainer::parent; + auto now = std::chrono::high_resolution_clock::now(); + auto now_ns = std::chrono::time_point_cast(now); + time = static_cast(now_ns.time_since_epoch().count()); +} +BenchReporter::~BenchReporter() +{ + if (stats == nullptr) { + return; + } + auto now = std::chrono::high_resolution_clock::now(); + auto now_ns = std::chrono::time_point_cast(now); + // Add, taking advantage of our parent context + stats->count.track(parent, static_cast(now_ns.time_since_epoch().count()) - time); + + // Unwind to previous parent + GlobalBenchStatsContainer::parent = parent; +} +} // namespace bb::detail +#endif diff --git a/barretenberg/cpp/src/barretenberg/common/bb_bench.hpp b/barretenberg/cpp/src/barretenberg/common/bb_bench.hpp new file mode 100644 index 000000000000..27e7da6e10c8 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/bb_bench.hpp @@ -0,0 +1,224 @@ + +#pragma once + +#include "barretenberg/common/compiler_hints.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Provides an abstraction that counts operations based on function names. + * For efficiency, we spread out counts across threads. + */ + +namespace bb::detail { +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +extern bool use_bb_bench; + +// Compile-time string +// See e.g. https://www.reddit.com/r/cpp_questions/comments/pumi9r/does_c20_not_support_string_literals_as_template/ +template struct OperationLabel { + constexpr static std::size_t size() { return N; } + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) + constexpr OperationLabel(const char (&str)[N]) + { + for (std::size_t i = 0; i < N; ++i) { + value[i] = str[i]; + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) + char value[N]; +}; + +template constexpr auto concat() +{ + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) + char result_cstr[op1.size() + op2.size() - 1] = {}; + std::copy(op1.value, op1.value + op1.size() - 1, result_cstr); + std::copy(op2.value, op2.value + op2.size(), result_cstr + op1.size() - 1); + return OperationLabel{ result_cstr }; +} +struct TimeStats; +struct TimeStatsEntry; +using OperationKey = std::string_view; + +struct TimeAndCount { + uint64_t time = 0; + uint64_t count = 0; +}; + +// Normalized benchmark entry - each represents a unique (function, parent) pair +struct AggregateEntry { + // For convenience, even though redundant with map store + OperationKey key; + OperationKey parent; + std::size_t time = 0; + std::size_t count = 0; + size_t num_threads = 0; + double time_mean = 0; + std::size_t time_max = 0; + double time_stddev = 0; + + // Welford's algorithm state + double time_m2 = 0; // sum of squared differences from mean + + void add_thread_time_sample(const TimeAndCount& stats); + double get_std_dev() const; +}; + +// AggregateData: Result of normalizing benchmark data +// entries: Key -> ParentKey -> Entry +// Empty string is used as key if the entry has no parent. +using AggregateData = std::unordered_map>; + +// Contains all statically known op counts +struct GlobalBenchStatsContainer { + public: + static inline thread_local TimeStatsEntry* parent = nullptr; + ~GlobalBenchStatsContainer(); + std::mutex mutex; + std::vector> entries; + void print() const; + // NOTE: Should be called when other threads aren't active + void clear(); + void add_entry(const char* key, const std::shared_ptr& entry); + void print_stats_recursive(const OperationKey& key, const TimeStats* stats, const std::string& indent) const; + void print_aggregate_counts(std::ostream&, size_t) const; + void print_aggregate_counts_hierarchical(std::ostream&) const; + + // Normalize the raw benchmark data into a clean structure for display + AggregateData aggregate() const; +}; + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +extern GlobalBenchStatsContainer GLOBAL_BENCH_STATS; + +// Tracks operation statistics and links them to their immediate parent context. +// Each stat is associated only with its direct parent, not the full call hierarchy. +// This allows measuring the direct contribution of nested operations to their parent, +// but doesn't provide recursive parent-child relationships through the entire call stack. +struct TimeStats { + TimeStatsEntry* parent = nullptr; + std::size_t count = 0; + std::size_t time = 0; + // Used if the parent changes from last call - chains to handle multiple parent contexts + std::unique_ptr next; + + TimeStats() = default; + TimeStats(TimeStatsEntry* parent_ptr, std::size_t count_val, std::size_t time_val) + : parent(parent_ptr) + , count(count_val) + , time(time_val) + {} + + void track(TimeStatsEntry* current_parent, std::size_t time_val) + { + // Try to track with current stats if parent matches + // Check if 'next' already handles this parent to avoid creating duplicates + if (raw_track(current_parent, time_val) || (next && next->raw_track(current_parent, time_val))) { + return; + } + // Create new TimeStats at the front of this linked list. + auto new_next = std::make_unique(parent, count, time); + new_next->next = std::move(next); + next = std::move(new_next); + + // Reset this node. + parent = current_parent; + count = 1; + time = time_val; + } + + private: + // Returns true if successfully tracked (parent matches), false otherwise + bool raw_track(TimeStatsEntry* expected_parent, std::size_t time_val) + { + if (parent != expected_parent) { + return false; + } + count++; + time += time_val; + return true; + } +}; + +// Each key will appear at most once *per thread*. +// Each thread has its own count for thread-safety. +struct TimeStatsEntry { + OperationKey key; + TimeStats count; +}; + +// The stat entry associated with a certain label AND a certain thread. +// These will later be aggregated, and the TimeStats itself contains stat +// entries for each caller context change (for later summarization). +template struct ThreadBenchStats { + public: + // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) + static inline thread_local std::shared_ptr stats; + + static void init_entry(TimeStatsEntry& entry); + // returns null if use_bb_bench not enabled + static std::shared_ptr ensure_stats() + { + if (bb::detail::use_bb_bench && BB_UNLIKELY(stats == nullptr)) { + stats = std::make_shared(); + GLOBAL_BENCH_STATS.add_entry(Op.value, stats); + } + return stats; + } +}; + +// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) +// no-op if passed null stats +struct BenchReporter { + TimeStatsEntry* parent; + TimeStatsEntry* stats; + std::size_t time; + BenchReporter(TimeStatsEntry* entry); + ~BenchReporter(); +}; +} // namespace bb::detail + +// Define macros. we use void(0) for empty ones as we want these to be statements that need a semicolon. +#ifdef TRACY_INSTRUMENTED +#define BB_TRACY() ZoneScopedN(__func__) +#define BB_TRACY_NAME(name) ZoneScopedN(name) +#define BB_BENCH_TRACY() ZoneScopedN(__func__) +#define BB_BENCH_TRACY_NAME(name) ZoneScopedN(name) +#define BB_BENCH_ONLY_NAME(name) (void)0 +#define BB_BENCH_ENABLE_NESTING() (void)0 +#define BB_BENCH_ONLY() (void)0 +#elif defined __wasm__ +#define BB_TRACY() (void)0 +#define BB_TRACY_NAME(name) (void)0 +#define BB_BENCH_TRACY() (void)0 +#define BB_BENCH_TRACY_NAME(name) (void)0 +#define BB_BENCH_ONLY_NAME(name) (void)0 +#define BB_BENCH_ENABLE_NESTING() (void)0 +#define BB_BENCH_ONLY() (void)0 +#else +#define BB_TRACY() (void)0 +#define BB_TRACY_NAME(name) (void)0 +#define BB_BENCH_TRACY() BB_BENCH_ONLY_NAME(__func__) +#define BB_BENCH_TRACY_NAME(name) BB_BENCH_ONLY_NAME(name) +#define BB_BENCH_ONLY_NAME(name) \ + bb::detail::BenchReporter _bb_bench_reporter((bb::detail::ThreadBenchStats::ensure_stats().get())) +#define BB_BENCH_ENABLE_NESTING() \ + if (_bb_bench_reporter.stats) \ + bb::detail::GlobalBenchStatsContainer::parent = _bb_bench_reporter.stats +#define BB_BENCH_ONLY() BB_BENCH_ONLY_NAME(__func__) +#endif +#define BB_BENCH_NAME(name) \ + BB_BENCH_TRACY_NAME(name); \ + BB_BENCH_ENABLE_NESTING() + +#define BB_BENCH() \ + BB_BENCH_TRACY(); \ + BB_BENCH_ENABLE_NESTING() diff --git a/barretenberg/cpp/src/barretenberg/common/flock.hpp b/barretenberg/cpp/src/barretenberg/common/flock.hpp new file mode 100644 index 000000000000..fe977e4e19a7 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/flock.hpp @@ -0,0 +1,64 @@ +#pragma once + +#ifdef _WIN32 +#include +#include +#define LOCK_SH 1 +#define LOCK_EX 2 +#define LOCK_NB 4 +#define LOCK_UN 8 + +static inline int flock(int fd, int operation) +{ + HANDLE h = (HANDLE)_get_osfhandle(fd); + OVERLAPPED o = { 0 }; + DWORD flags = 0; + + if (operation & LOCK_NB) + flags |= LOCKFILE_FAIL_IMMEDIATELY; + if (operation & LOCK_EX) + flags |= LOCKFILE_EXCLUSIVE_LOCK; + + if (operation & LOCK_UN) { + return UnlockFileEx(h, 0, MAXDWORD, MAXDWORD, &o) ? 0 : -1; + } + return LockFileEx(h, flags, 0, MAXDWORD, MAXDWORD, &o) ? 0 : -1; +} +#else +#include +#endif + +#include +#include +#include + +struct FileLockGuard { + int fd; + + explicit FileLockGuard([[maybe_unused]] std::string_view path, + [[maybe_unused]] int flags = O_RDWR | O_CREAT, + [[maybe_unused]] mode_t mode = 0644) + { +#ifndef __wasm__ + fd = open(std::string(path).c_str(), flags, mode); + if (fd != -1) { + flock(fd, LOCK_EX); + } +#else + fd = -1; +#endif + } + + ~FileLockGuard() + { +#ifndef __wasm__ + if (fd != -1) { + flock(fd, LOCK_UN); + close(fd); + } +#endif + } + + FileLockGuard(const FileLockGuard&) = delete; + FileLockGuard& operator=(const FileLockGuard&) = delete; +}; diff --git a/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp b/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp index 818fd71e4931..120d50d9b450 100644 --- a/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp +++ b/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp @@ -123,9 +123,7 @@ uint256_t read_uint256(const uint8_t* data, size_t buffer_size = 32) */ template concept SimpleRng = requires(T a) { - { - a.next() - } -> std::convertible_to; + { a.next() } -> std::convertible_to; }; /** * @brief Concept for forcing ArgumentSizes to be size_t @@ -189,9 +187,7 @@ concept ArithmeticFuzzHelperConstraint = requires { */ template concept CheckableComposer = requires(T a) { - { - bb::CircuitChecker::check(a) - } -> std::same_as; + { bb::CircuitChecker::check(a) } -> std::same_as; }; /** @@ -203,9 +199,7 @@ concept CheckableComposer = requires(T a) { */ template concept PostProcessingEnabled = requires(Composer composer, Context context) { - { - T::postProcess(&composer, context) - } -> std::same_as; + { T::postProcess(&composer, context) } -> std::same_as; }; /** diff --git a/barretenberg/cpp/src/barretenberg/common/get_bytecode.cpp b/barretenberg/cpp/src/barretenberg/common/get_bytecode.cpp new file mode 100644 index 000000000000..a1c98bb2f4ba --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/get_bytecode.cpp @@ -0,0 +1,108 @@ +#include "get_bytecode.hpp" +#include "barretenberg/common/throw_or_abort.hpp" +#include "base64.hpp" +#include +#include + +#ifndef __wasm__ +#include +#include +#include +#include +#endif + +namespace { + +std::vector gzip_decompress([[maybe_unused]] const std::vector& compressed) +{ +#ifdef __wasm__ + throw_or_abort("gzip_decompress not supported in WASM"); +#else + std::vector decompressed; + decompressed.resize(1024ULL * 128ULL); // Initial size guess + + for (;;) { + auto decompressor = std::unique_ptr{ + libdeflate_alloc_decompressor(), libdeflate_free_decompressor + }; + size_t actual_size = 0; + libdeflate_result result = libdeflate_gzip_decompress(decompressor.get(), + compressed.data(), + compressed.size(), + decompressed.data(), + decompressed.size(), + &actual_size); + + if (result == LIBDEFLATE_INSUFFICIENT_SPACE) { + decompressed.resize(decompressed.size() * 2); + continue; + } + if (result == LIBDEFLATE_BAD_DATA) { + throw std::runtime_error("Invalid gzip data"); + } + decompressed.resize(actual_size); + break; + } + return decompressed; +#endif +} +} // namespace + +std::vector decode_bytecode(const std::string& base64_bytecode) +{ + // Decode base64 and decompress using libdeflate for gzip + std::string decoded = base64_decode(base64_bytecode, false); + std::vector gzipped(decoded.begin(), decoded.end()); + return gzip_decompress(gzipped); +} + +std::vector get_bytecode_from_json([[maybe_unused]] const std::string& json_path) +{ +#ifdef __wasm__ + throw_or_abort("get_bytecode_from_json not supported in WASM"); +#else + std::ifstream json_file(json_path); + if (!json_file.is_open()) { + throw std::runtime_error("Failed to open JSON file: " + json_path); + } + + nlohmann::json json_data = nlohmann::json::parse(json_file); + std::string base64_bytecode = json_data["bytecode"]; + + return decode_bytecode(base64_bytecode); +#endif +} + +std::vector gunzip([[maybe_unused]] const std::string& path) +{ +#ifdef __wasm__ + throw_or_abort("gunzip not supported in WASM"); +#else + std::ifstream file(path, std::ios::binary); + if (!file.is_open()) { + throw std::runtime_error("Failed to open file: " + path); + } + + std::vector compressed((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + return gzip_decompress(compressed); +#endif +} + +std::vector get_bytecode([[maybe_unused]] const std::string& bytecodePath) +{ +#ifdef __wasm__ + throw_or_abort("get_bytecode not supported in WASM"); +#else + if (bytecodePath == "-") { + return { (std::istreambuf_iterator(std::cin)), std::istreambuf_iterator() }; + } + std::filesystem::path filePath = bytecodePath; + if (filePath.extension() == ".json") { + // Try reading json files as if they are a Nargo build artifact + return get_bytecode_from_json(bytecodePath); + } + + // For other extensions, assume file is a raw ACIR program + return gunzip(bytecodePath); +#endif +} diff --git a/barretenberg/cpp/src/barretenberg/common/get_bytecode.hpp b/barretenberg/cpp/src/barretenberg/common/get_bytecode.hpp new file mode 100644 index 000000000000..a3ec06f28142 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/get_bytecode.hpp @@ -0,0 +1,18 @@ +#pragma once +#include +#include +#include +#include +#include + +// Parse JSON file and extract base64-encoded gzipped bytecode +std::vector get_bytecode_from_json(const std::string& json_path); + +// Decode base64-encoded gzipped bytecode string +std::vector decode_bytecode(const std::string& base64_bytecode); + +// Decompress gzipped file using libdeflate +std::vector gunzip(const std::string& path); + +// Get bytecode from various file formats +std::vector get_bytecode(const std::string& bytecode_path); diff --git a/barretenberg/cpp/src/barretenberg/common/google_bb_bench.hpp b/barretenberg/cpp/src/barretenberg/common/google_bb_bench.hpp new file mode 100644 index 000000000000..a1da56ad982f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/google_bb_bench.hpp @@ -0,0 +1,57 @@ +// integrates bb bench stats with google benchmark +#pragma once +#include + +#ifdef __wasm__ +namespace bb { +struct GoogleBbBenchReporter { + GoogleBbBenchReporter(::benchmark::State& state) + { + // unused, we don't have op counts on + (void)state; + } +}; +}; // namespace bb +// require a semicolon to appease formatters +#define GOOGLE_BB_BENCH_REPORTER(state) (void)0 +#define GOOGLE_BB_BENCH_REPORTER_CANCEL() (void)0 +#else +#include "bb_bench.hpp" +namespace bb { +// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) +struct GoogleBbBenchReporter { + // We allow having a ref member as this only lives inside a function frame + ::benchmark::State& state; + bool cancelled = false; + GoogleBbBenchReporter(::benchmark::State& state) + : state(state) + { + bb::detail::use_bb_bench = true; + // Intent: Clear when we enter the state loop + bb::detail::GLOBAL_BENCH_STATS.clear(); + } + ~GoogleBbBenchReporter() + { + if (std::getenv("BB_BENCH") != nullptr) { + bb::detail::GLOBAL_BENCH_STATS.print_aggregate_counts_hierarchical(std::cerr); + } + // Allow for conditional reporting + if (cancelled) { + return; + } + // Intent: Collect results when we exit the state loop + for (auto& [key, parent_map] : bb::detail::GLOBAL_BENCH_STATS.aggregate()) { + for (auto& entry : parent_map) { + state.counters[std::string(key) + "(s)"] += static_cast(entry.second.time); + state.counters[std::string(key)] += static_cast(entry.second.count); + } + } + } +}; +// Allow for integration with google benchmark user-defined counters +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define GOOGLE_BB_BENCH_REPORTER(state) bb::GoogleBbBenchReporter __GOOGLE_BB_BENCH_REPORTER{ state }; +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define GOOGLE_BB_BENCH_REPORTER_CANCEL() __GOOGLE_BB_BENCH_REPORTER.cancelled = true; +}; // namespace bb +#endif diff --git a/barretenberg/cpp/src/barretenberg/common/log.hpp b/barretenberg/cpp/src/barretenberg/common/log.hpp index 9cc024193ff7..df8fae2d57a5 100644 --- a/barretenberg/cpp/src/barretenberg/common/log.hpp +++ b/barretenberg/cpp/src/barretenberg/common/log.hpp @@ -2,6 +2,7 @@ #include "barretenberg/env/logstr.hpp" #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp" #include +#include #include #include #include @@ -55,28 +56,33 @@ template std::string benchmark_format(Args... args) } extern bool debug_logging; +// In release mode (e.g., NDEBUG is defined), we don't compile debug logs. #ifndef NDEBUG -template inline void debug(Args... args) +#define debug(...) debug_([&]() { return format(__VA_ARGS__); }) +#else +#define debug(...) (void)0 +#endif + +// We take a function so that evaluation is lazy. +inline void debug_(std::function func) { - // NDEBUG is used to turn off asserts, so we want this flag to prevent debug log spamming. if (debug_logging) { - logstr(format(args...).c_str()); + logstr(func().c_str()); } } -#else -template inline void debug(Args... /*unused*/) {} -#endif template inline void info(Args... args) { logstr(format(args...).c_str()); } +#define vinfo(...) vinfo_([&]() { return format(__VA_ARGS__); }) + extern bool verbose_logging; -template inline void vinfo(Args... args) +inline void vinfo_(std::function func) { if (verbose_logging) { - info(args...); + info(func()); } } diff --git a/barretenberg/cpp/src/barretenberg/common/named_union.hpp b/barretenberg/cpp/src/barretenberg/common/named_union.hpp index ee58c7a3f643..61de5f44fbd8 100644 --- a/barretenberg/cpp/src/barretenberg/common/named_union.hpp +++ b/barretenberg/cpp/src/barretenberg/common/named_union.hpp @@ -20,9 +20,7 @@ namespace bb { */ template concept HasMsgpackSchemaName = requires { - { - T::MSGPACK_SCHEMA_NAME - } -> std::convertible_to; + { T::MSGPACK_SCHEMA_NAME } -> std::convertible_to; }; /** diff --git a/barretenberg/cpp/src/barretenberg/common/op_count.cpp b/barretenberg/cpp/src/barretenberg/common/op_count.cpp deleted file mode 100644 index d95d46a7788c..000000000000 --- a/barretenberg/cpp/src/barretenberg/common/op_count.cpp +++ /dev/null @@ -1,104 +0,0 @@ - -#include -#ifdef BB_USE_OP_COUNT -#include "op_count.hpp" -#include -#include -#include - -namespace bb::detail { - -GlobalOpCountContainer::~GlobalOpCountContainer() -{ - // This is useful for printing counts at the end of non-benchmarks. - // See op_count_google_bench.hpp for benchmarks. - // print(); -} - -void GlobalOpCountContainer::add_entry(const char* key, const std::shared_ptr& count) -{ - std::unique_lock lock(mutex); - std::stringstream ss; - ss << std::this_thread::get_id(); - counts.push_back({ key, ss.str(), count }); -} - -void GlobalOpCountContainer::print() const -{ - std::cout << "print_op_counts() START" << std::endl; - for (const Entry& entry : counts) { - if (entry.count->count > 0) { - std::cout << entry.key << "\t" << entry.count->count << "\t[thread=" << entry.thread_id << "]" << std::endl; - } - if (entry.count->time > 0) { - std::cout << entry.key << "(t)\t" << static_cast(entry.count->time) / 1000000.0 - << "ms\t[thread=" << entry.thread_id << "]" << std::endl; - } - if (entry.count->cycles > 0) { - std::cout << entry.key << "(c)\t" << entry.count->cycles << "\t[thread=" << entry.thread_id << "]" - << std::endl; - } - } - std::cout << "print_op_counts() END" << std::endl; -} - -std::map GlobalOpCountContainer::get_aggregate_counts() const -{ - std::map aggregate_counts; - for (const Entry& entry : counts) { - if (entry.count->count > 0) { - aggregate_counts[entry.key] += entry.count->count; - } - if (entry.count->time > 0) { - aggregate_counts[entry.key + "(t)"] += entry.count->time; - } - if (entry.count->cycles > 0) { - aggregate_counts[entry.key + "(c)"] += entry.count->cycles; - } - } - return aggregate_counts; -} - -void GlobalOpCountContainer::clear() -{ - std::unique_lock lock(mutex); - for (Entry& entry : counts) { - *entry.count = OpStats(); - } -} - -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -GlobalOpCountContainer GLOBAL_OP_COUNTS; - -OpCountCycleReporter::OpCountCycleReporter(OpStats* stats) - : stats(stats) -{ -#if __clang__ && (defined(__x86_64__) || defined(_M_X64) || defined(__i386) || defined(_M_IX86)) - // Don't support any other targets but x86 clang for now, this is a bit lazy but more than fits our needs - cycles = __builtin_ia32_rdtsc(); -#endif -} -OpCountCycleReporter::~OpCountCycleReporter() -{ -#if __clang__ && (defined(__x86_64__) || defined(_M_X64) || defined(__i386) || defined(_M_IX86)) - // Don't support any other targets but x86 clang for now, this is a bit lazy but more than fits our needs - stats->count += 1; - stats->cycles += __builtin_ia32_rdtsc() - cycles; -#endif -} -OpCountTimeReporter::OpCountTimeReporter(OpStats* stats) - : stats(stats) -{ - auto now = std::chrono::high_resolution_clock::now(); - auto now_ns = std::chrono::time_point_cast(now); - time = static_cast(now_ns.time_since_epoch().count()); -} -OpCountTimeReporter::~OpCountTimeReporter() -{ - auto now = std::chrono::high_resolution_clock::now(); - auto now_ns = std::chrono::time_point_cast(now); - stats->count += 1; - stats->time += static_cast(now_ns.time_since_epoch().count()) - time; -} -} // namespace bb::detail -#endif diff --git a/barretenberg/cpp/src/barretenberg/common/op_count.hpp b/barretenberg/cpp/src/barretenberg/common/op_count.hpp deleted file mode 100644 index 313d0c6e56bd..000000000000 --- a/barretenberg/cpp/src/barretenberg/common/op_count.hpp +++ /dev/null @@ -1,173 +0,0 @@ - -#pragma once - -#include -#include - -#ifdef BB_USE_OP_COUNT_TIME_ONLY -#define PROFILE_THIS() BB_OP_COUNT_TIME_NAME(__func__) -#define PROFILE_THIS_NAME(name) BB_OP_COUNT_TIME_NAME(name) -#elif defined TRACY_INSTRUMENTED -#define PROFILE_THIS() ZoneScopedN(__func__) -#define PROFILE_THIS_NAME(name) ZoneScopedN(name) -#else -#define PROFILE_THIS() (void)0 -#define PROFILE_THIS_NAME(name) (void)0 -#endif - -#ifndef BB_USE_OP_COUNT -// require a semicolon to appease formatters -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TRACK() (void)0 -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TRACK_NAME(name) (void)0 -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_CYCLES_NAME(name) (void)0 -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TIME_NAME(name) (void)0 -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_CYCLES() (void)0 -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TIME() (void)0 -#else -/** - * Provides an abstraction that counts operations based on function names. - * For efficiency, we spread out counts across threads. - */ - -#include "barretenberg/common/compiler_hints.hpp" -#include -#include -#include -#include -#include -#include -#include -namespace bb::detail { -// Compile-time string -// See e.g. https://www.reddit.com/r/cpp_questions/comments/pumi9r/does_c20_not_support_string_literals_as_template/ -template struct OperationLabel { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) - constexpr OperationLabel(const char (&str)[N]) - { - for (std::size_t i = 0; i < N; ++i) { - value[i] = str[i]; - } - } - - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) - char value[N]; -}; - -struct OpStats { - std::size_t count = 0; - std::size_t time = 0; - std::size_t cycles = 0; -}; - -// Contains all statically known op counts -struct GlobalOpCountContainer { - public: - struct Entry { - std::string key; - std::string thread_id; - std::shared_ptr count; - }; - ~GlobalOpCountContainer(); - std::mutex mutex; - std::vector counts; - void print() const; - // NOTE: Should be called when other threads aren't active - void clear(); - void add_entry(const char* key, const std::shared_ptr& count); - std::map get_aggregate_counts() const; -}; - -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -extern GlobalOpCountContainer GLOBAL_OP_COUNTS; - -template struct GlobalOpCount { - public: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) - static thread_local std::shared_ptr stats; - - static OpStats* ensure_stats() - { - if (BB_UNLIKELY(stats == nullptr)) { - stats = std::make_shared(); - GLOBAL_OP_COUNTS.add_entry(Op.value, stats); - } - return stats.get(); - } - static constexpr void increment_op_count() - { -#ifndef BB_USE_OP_COUNT_TIME_ONLY - if (std::is_constant_evaluated()) { - // We do nothing if the compiler tries to run this - return; - } - ensure_stats(); - stats->count++; -#endif - } - static constexpr void add_cycle_time(std::size_t cycles) - { -#ifndef BB_USE_OP_COUNT_TRACK_ONLY - if (std::is_constant_evaluated()) { - // We do nothing if the compiler tries to run this - return; - } - ensure_stats(); - stats->cycles += cycles; -#else - static_cast(cycles); -#endif - } - static constexpr void add_clock_time(std::size_t time) - { -#ifndef BB_USE_OP_COUNT_TRACK_ONLY - if (std::is_constant_evaluated()) { - // We do nothing if the compiler tries to run this - return; - } - ensure_stats(); - stats->time += time; -#else - static_cast(time); -#endif - } -}; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -template thread_local std::shared_ptr GlobalOpCount::stats; - -// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) -struct OpCountCycleReporter { - OpStats* stats; - std::size_t cycles; - OpCountCycleReporter(OpStats* stats); - ~OpCountCycleReporter(); -}; -// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) -struct OpCountTimeReporter { - OpStats* stats; - std::size_t time; - OpCountTimeReporter(OpStats* stats); - ~OpCountTimeReporter(); -}; -} // namespace bb::detail - -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TRACK_NAME(name) bb::detail::GlobalOpCount::increment_op_count() -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TRACK() BB_OP_COUNT_TRACK_NAME(__func__) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_CYCLES_NAME(name) \ - bb::detail::OpCountCycleReporter __bb_op_count_cyles(bb::detail::GlobalOpCount::ensure_stats()) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_CYCLES() BB_OP_COUNT_CYCLES_NAME(__func__) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TIME_NAME(name) \ - bb::detail::OpCountTimeReporter __bb_op_count_time(bb::detail::GlobalOpCount::ensure_stats()) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_OP_COUNT_TIME() BB_OP_COUNT_TIME_NAME(__func__) -#endif diff --git a/barretenberg/cpp/src/barretenberg/common/op_count_google_bench.hpp b/barretenberg/cpp/src/barretenberg/common/op_count_google_bench.hpp deleted file mode 100644 index 5364b9a6cd83..000000000000 --- a/barretenberg/cpp/src/barretenberg/common/op_count_google_bench.hpp +++ /dev/null @@ -1,50 +0,0 @@ - -#pragma once -#include - -#ifndef BB_USE_OP_COUNT -namespace bb { -struct GoogleBenchOpCountReporter { - GoogleBenchOpCountReporter(::benchmark::State& state) - { - // unused, we don't have op counts on - (void)state; - } -}; -}; // namespace bb -// require a semicolon to appease formatters -#define BB_REPORT_OP_COUNT_IN_BENCH(state) (void)0 -#define BB_REPORT_OP_COUNT_BENCH_CANCEL() (void)0 -#else -#include "op_count.hpp" -namespace bb { -// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) -struct GoogleBenchOpCountReporter { - // We allow having a ref member as this only lives inside a function frame - ::benchmark::State& state; - bool cancelled = false; - GoogleBenchOpCountReporter(::benchmark::State& state) - : state(state) - { - // Intent: Clear when we enter the state loop - bb::detail::GLOBAL_OP_COUNTS.clear(); - } - ~GoogleBenchOpCountReporter() - { - // Allow for conditional reporting - if (cancelled) { - return; - } - // Intent: Collect results when we exit the state loop - for (auto& entry : bb::detail::GLOBAL_OP_COUNTS.get_aggregate_counts()) { - state.counters[entry.first] = static_cast(entry.second); - } - } -}; -// Allow for integration with google benchmark user-defined counters -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_REPORT_OP_COUNT_IN_BENCH(state) bb::GoogleBenchOpCountReporter __bb_report_op_count_in_bench{ state }; -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BB_REPORT_OP_COUNT_BENCH_CANCEL() __bb_report_op_count_in_bench.cancelled = true; -}; // namespace bb -#endif diff --git a/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp b/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp index 6f8a0b6ea5d9..20bd9d2d183c 100644 --- a/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp +++ b/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp @@ -1,3 +1,5 @@ +#include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/throw_or_abort.hpp" #ifndef NO_MULTITHREADING #include "log.hpp" @@ -26,6 +28,7 @@ class ThreadPool { void start_tasks(size_t num_iterations, const std::function& func) { + parent.store(bb::detail::GlobalBenchStatsContainer::parent); { std::unique_lock lock(tasks_mutex); task_ = func; @@ -38,12 +41,14 @@ class ThreadPool { do_iterations(); { + // BB_BENCH_NAME("spinning main thread"); std::unique_lock lock(tasks_mutex); complete_condition_.wait(lock, [this] { return complete_ == num_iterations_; }); } } private: + std::atomic parent = nullptr; std::vector workers; std::mutex tasks_mutex; std::function task_; @@ -67,6 +72,7 @@ class ThreadPool { } iteration = iteration_++; } + // BB_BENCH_NAME("do_iterations()"); task_(iteration); { std::unique_lock lock(tasks_mutex); @@ -111,6 +117,9 @@ void ThreadPool::worker_loop(size_t /*unused*/) break; } } + // Make sure nested stats accounting works under multithreading + // Note: parent is a thread-local variable. + bb::detail::GlobalBenchStatsContainer::parent = parent.load(); do_iterations(); } // info("worker exit ", worker_num); @@ -131,7 +140,11 @@ void parallel_for_mutex_pool(size_t num_iterations, const std::function class RefArray { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #endif - BB_ASSERT_LT(idx, N); + ASSERT_DEBUG(idx < N); return *storage[idx]; #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop diff --git a/barretenberg/cpp/src/barretenberg/common/ref_span.hpp b/barretenberg/cpp/src/barretenberg/common/ref_span.hpp index a08f85760376..74d5da476584 100644 --- a/barretenberg/cpp/src/barretenberg/common/ref_span.hpp +++ b/barretenberg/cpp/src/barretenberg/common/ref_span.hpp @@ -27,7 +27,7 @@ template class RefSpan { {} // Constructor from an array of pointers and size - RefSpan(T** ptr_array, std::size_t size) + RefSpan(T* const* ptr_array, std::size_t size) : storage(ptr_array) , array_size(size) {} @@ -57,6 +57,18 @@ template class RefSpan { // Get size of the RefSpan constexpr std::size_t size() const { return array_size; } + RefSpan subspan(std::size_t offset, std::size_t count) + { + // NOTE: like std::span, assumes the caller ensures offset and count are within bounds. + return RefSpan(storage + offset, count); + } + + RefSpan subspan(std::size_t offset) + { + // NOTE: like std::span, assumes the caller ensures offset and count are within bounds. + return RefSpan(storage + offset, array_size - offset); + } + // Iterator implementation class iterator { public: diff --git a/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp b/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp index 759e10fa7517..8a9a9b8f34c8 100644 --- a/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp +++ b/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -49,9 +50,19 @@ template class RefVector { } } + // Note: making this a constructor gives problems in prover_instance.cpp. + static RefVector from_span(const std::span& span) + { + RefVector ret; + for (std::size_t i = 0; i < span.size(); ++i) { + ret.push_back(span[i]); + } + return ret; + } + T& operator[](std::size_t idx) const { - BB_ASSERT_LT(idx, storage.size()); + ASSERT_DEBUG(idx < storage.size()); return *storage[idx]; } @@ -97,7 +108,7 @@ template class RefVector { std::size_t size() const { return storage.size(); } - void push_back(T& element) { storage.push_back(element); } + void push_back(T& element) { storage.push_back(&element); } iterator begin() const { return iterator(this, 0); } iterator end() const { return iterator(this, storage.size()); } diff --git a/barretenberg/cpp/src/barretenberg/common/serialize.hpp b/barretenberg/cpp/src/barretenberg/common/serialize.hpp index b8edfe166588..2fdc1a330c54 100644 --- a/barretenberg/cpp/src/barretenberg/common/serialize.hpp +++ b/barretenberg/cpp/src/barretenberg/common/serialize.hpp @@ -384,18 +384,6 @@ template inline void read(B& it, std::optional& opt_ opt_value = T(value); } -template -concept HasGetAll = requires(T t) { t.get_all(); } && !msgpack_concepts::HasMsgPack; - -// Write out a struct that defines get_all() -template inline void write(B& buf, T const& value) -{ - using serialize::write; - for (auto& reference : value.get_all()) { - write(buf, reference); - } -} - // Write std::optional. // Note: It takes up a different amount of space, depending on whether it's std::nullopt or populated with an actual // value. diff --git a/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp b/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp index caeb505e50f4..09316eed707a 100644 --- a/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp +++ b/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp @@ -1,8 +1,8 @@ #include "slab_allocator.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/log.hpp" #include "barretenberg/common/mem.hpp" -#include "barretenberg/common/op_count.hpp" #include #include #include @@ -212,8 +212,6 @@ void init_slab_allocator(size_t circuit_subgroup_size) std::shared_ptr get_mem_slab(size_t size) { - PROFILE_THIS(); - return allocator.get(size); } diff --git a/barretenberg/cpp/src/barretenberg/common/thread.cpp b/barretenberg/cpp/src/barretenberg/common/thread.cpp index 857d67b58130..d7a7695c5100 100644 --- a/barretenberg/cpp/src/barretenberg/common/thread.cpp +++ b/barretenberg/cpp/src/barretenberg/common/thread.cpp @@ -1,5 +1,44 @@ #include "thread.hpp" #include "log.hpp" +#include "throw_or_abort.hpp" +#include +#include +#include + +#ifndef NO_MULTITHREADING +#include + +namespace { +uint32_t& get_num_cores_ref() +{ + static thread_local const char* val = std::getenv("HARDWARE_CONCURRENCY"); + static thread_local uint32_t cores = + val != nullptr ? static_cast(std::stoul(val)) : env_hardware_concurrency(); + return cores; +} +} // namespace +#endif + +namespace bb { +// only for testing purposes currently +void set_parallel_for_concurrency([[maybe_unused]] size_t num_cores) +{ +#ifdef NO_MULTITHREADING + throw_or_abort("Cannot set hardware concurrency when multithreading is disabled."); +#else + get_num_cores_ref() = static_cast(num_cores); +#endif +} + +size_t get_num_cpus() +{ +#ifdef NO_MULTITHREADING + return 1; +#else + return static_cast(get_num_cores_ref()); +#endif +} +} // namespace bb /** * There's a lot to talk about here. To bring threading to WASM, parallel_for was written to replace the OpenMP loops diff --git a/barretenberg/cpp/src/barretenberg/common/thread.hpp b/barretenberg/cpp/src/barretenberg/common/thread.hpp index c97aab48462f..dec64875cbf7 100644 --- a/barretenberg/cpp/src/barretenberg/common/thread.hpp +++ b/barretenberg/cpp/src/barretenberg/common/thread.hpp @@ -5,14 +5,21 @@ #include #include #include +#include #include namespace bb { +#ifdef __wasm__ +// Fixed number of workers in WASM environment +constexpr size_t PARALLEL_FOR_MAX_NESTING = 1; +#else +constexpr size_t PARALLEL_FOR_MAX_NESTING = 2; +#endif -inline size_t get_num_cpus() -{ - return env_hardware_concurrency(); -} +// Useful for programatically benching different thread counts +// Note this is threadsafe and affects parallel_for's just in that thread if so. +void set_parallel_for_concurrency(size_t num_cores); +size_t get_num_cpus(); // For algorithms that need to be divided amongst power of 2 threads. inline size_t get_num_cpus_pow2() @@ -148,4 +155,39 @@ constexpr size_t FF_COPY_COST = 3; constexpr size_t ALWAYS_MULTITHREAD = 100000; } // namespace thread_heuristics +struct ThreadChunk { + size_t thread_index; + size_t total_threads; + auto range(size_t size, size_t offset = 0) const + { + if (total_threads == 0 || thread_index >= total_threads) { + return std::views::iota(size_t{ 0 }, size_t{ 0 }); + } + // Calculate base chunk size and remainder + size_t chunk_size = size / total_threads; + size_t remainder = size % total_threads; + + if (thread_index < remainder) { + // Threads with index < remainder get chunk_size + 1 elements + size_t start = thread_index * (chunk_size + 1); + size_t end = start + chunk_size + 1; + return std::views::iota(start + offset, end + offset); + } + // Threads with index >= remainder get chunk_size elements + size_t start = remainder * (chunk_size + 1) + (thread_index - remainder) * chunk_size; + size_t end = start + chunk_size; + return std::views::iota(start + offset, end + offset); + } +}; + +template + requires std::invocable +void parallel_for(const Func& func) +{ + size_t total_threads = get_num_cpus(); + parallel_for(total_threads, [&](size_t thread_index) { + func(ThreadChunk{ .thread_index = thread_index, .total_threads = total_threads }); + }); +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/common/thread.test.cpp b/barretenberg/cpp/src/barretenberg/common/thread.test.cpp new file mode 100644 index 000000000000..24a0c8092f42 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/thread.test.cpp @@ -0,0 +1,243 @@ +#include "thread.hpp" +#include "barretenberg/common/log.hpp" +#include +#include +#include +#include + +namespace bb { + +class ThreadTest : public ::testing::Test { + protected: + void SetUp() override + { + // Store original concurrency for restoration + original_concurrency = get_num_cpus(); + } + + void TearDown() override + { + // Restore original concurrency + set_parallel_for_concurrency(original_concurrency); + } + + size_t original_concurrency; +}; + +// Test basic parallel_for functionality +TEST_F(ThreadTest, BasicParallelFor) +{ + constexpr size_t num_iterations = 100; + std::vector flags(num_iterations, 0); + + parallel_for(num_iterations, [&](size_t i) { flags[i] = 1; }); + + // All iterations should have been executed + for (size_t i = 0; i < num_iterations; ++i) { + EXPECT_TRUE(flags[i]); + } +} + +// Test nested parallel_for +TEST_F(ThreadTest, NestedParallelFor) +{ + constexpr size_t outer_iterations = 4; + constexpr size_t inner_iterations = 10; + + std::vector> flags(outer_iterations, std::vector(inner_iterations, 0)); + + parallel_for(outer_iterations, + [&](size_t i) { parallel_for(inner_iterations, [&](size_t j) { flags[i][j] = 1; }); }); + + // All iterations should have been executed + for (size_t i = 0; i < outer_iterations; ++i) { + for (size_t j = 0; j < inner_iterations; ++j) { + EXPECT_TRUE(flags[i][j]); + } + } +} + +// Test thread count calculation +TEST_F(ThreadTest, CalculateNumThreads) +{ + set_parallel_for_concurrency(8); + + // With default min iterations per thread (16) + // 160 iterations / 16 = 10 desired threads, min(10, 8) = 8 + EXPECT_EQ(calculate_num_threads(160), 8); + + // 64 iterations / 16 = 4 desired threads, min(4, 8) = 4 + EXPECT_EQ(calculate_num_threads(64), 4); + + // 8 iterations / 16 = 0 desired threads, but should be at least 1 + EXPECT_EQ(calculate_num_threads(8), 1); + + // Custom min iterations per thread + // 100 iterations / 10 = 10 desired threads, min(10, 8) = 8 + EXPECT_EQ(calculate_num_threads(100, 10), 8); + + // 30 iterations / 10 = 3 desired threads, min(3, 8) = 3 + EXPECT_EQ(calculate_num_threads(30, 10), 3); +} + +// Test thread count calculation with power of 2 +TEST_F(ThreadTest, CalculateNumThreadsPow2) +{ + set_parallel_for_concurrency(8); + + // With default min iterations per thread (16) + // 160 iterations / 16 = 10 desired, nearest power of 2 is 8, min(8, 8) = 8 + EXPECT_EQ(calculate_num_threads_pow2(160), 8); + + // 64 iterations / 16 = 4 desired, power of 2 is 4, min(4, 8) = 4 + EXPECT_EQ(calculate_num_threads_pow2(64), 4); + + // 96 iterations / 16 = 6 desired, nearest power of 2 is 4, min(4, 8) = 4 + EXPECT_EQ(calculate_num_threads_pow2(96), 4); + + // 8 iterations / 16 = 0 desired, should be at least 1 + EXPECT_EQ(calculate_num_threads_pow2(8), 1); +} + +// Test parallel_for with zero iterations +TEST_F(ThreadTest, ZeroIterations) +{ + size_t counter = 0; + + parallel_for(0, [&](size_t) { counter++; }); + + EXPECT_EQ(counter, 0); +} + +// Test parallel_for with one iteration +TEST_F(ThreadTest, OneIteration) +{ + size_t counter = 0; + + parallel_for(1, [&](size_t i) { + counter++; + EXPECT_EQ(i, 0); + }); + + EXPECT_EQ(counter, 1); +} + +// Test parallel_for_range +TEST_F(ThreadTest, ParallelForRange) +{ + constexpr size_t num_points = 100; + std::vector flags(num_points, 0); + + parallel_for_range(num_points, [&](size_t start, size_t end) { + for (size_t i = start; i < end; ++i) { + flags[i] = 1; + } + }); + + // All iterations should have been executed + for (size_t i = 0; i < num_points; ++i) { + EXPECT_TRUE(flags[i]); + } +} + +// Test parallel_for_range with threshold +TEST_F(ThreadTest, ParallelForRangeThreshold) +{ + constexpr size_t num_points = 10; + std::vector flags(num_points, 0); + + std::atomic call_count{ 0 }; + + // Set threshold to 10, so with exactly 10 points it should run sequentially (1 call) + parallel_for_range( + num_points, + [&](size_t start, size_t end) { + call_count++; + for (size_t i = start; i < end; ++i) { + flags[i] = 1; + } + }, + 10); + + // All iterations should have been executed + for (size_t i = 0; i < num_points; ++i) { + EXPECT_TRUE(flags[i]); + } + + // Should have been called exactly once (sequential) + EXPECT_EQ(call_count, 1); +} + +// Test get_num_cpus with different hardware concurrency values +TEST_F(ThreadTest, HardwareConcurrency) +{ + set_parallel_for_concurrency(1); + EXPECT_EQ(get_num_cpus(), 1); + + set_parallel_for_concurrency(4); + EXPECT_EQ(get_num_cpus(), 4); + + set_parallel_for_concurrency(16); + EXPECT_EQ(get_num_cpus(), 16); + + set_parallel_for_concurrency(128); + EXPECT_EQ(get_num_cpus(), 128); +} + +// Test get_num_cpus_pow2 +TEST_F(ThreadTest, HardwareConcurrencyPow2) +{ + set_parallel_for_concurrency(1); + EXPECT_EQ(get_num_cpus_pow2(), 1); + + set_parallel_for_concurrency(4); + EXPECT_EQ(get_num_cpus_pow2(), 4); + + set_parallel_for_concurrency(5); + EXPECT_EQ(get_num_cpus_pow2(), 4); // Round down to power of 2 + + set_parallel_for_concurrency(7); + EXPECT_EQ(get_num_cpus_pow2(), 4); // Round down to power of 2 + + set_parallel_for_concurrency(8); + EXPECT_EQ(get_num_cpus_pow2(), 8); + + set_parallel_for_concurrency(15); + EXPECT_EQ(get_num_cpus_pow2(), 8); // Round down to power of 2 + + set_parallel_for_concurrency(16); + EXPECT_EQ(get_num_cpus_pow2(), 16); +} + +// Test main thread concurrency isolation and nested concurrency +TEST_F(ThreadTest, ConcurrencyIsolation) +{ + set_parallel_for_concurrency(8); + + // Main thread concurrency should be preserved before/after parallel_for + size_t cpus_before = get_num_cpus(); + EXPECT_EQ(cpus_before, 8); + + std::vector> observed_inner_cpus(4); + + parallel_for(4, [&](size_t outer_idx) { + // Worker threads get their own thread_local concurrency set by the pool + // With 8 CPUs and 4 outer tasks, each gets at least 2 CPUs for inner work + size_t inner_cpus = get_num_cpus(); + observed_inner_cpus[outer_idx].store(inner_cpus); + + // Run a nested parallel_for to verify inner concurrency works + parallel_for(10, [](size_t) {}); + }); + + // All inner parallel_for calls should see at least 2 CPUs + for (size_t i = 0; i < 4; ++i) { + EXPECT_GE(observed_inner_cpus[i].load(), 2); + } + + // Main thread concurrency should be unchanged + size_t cpus_after = get_num_cpus(); + EXPECT_EQ(cpus_after, 8); + EXPECT_EQ(cpus_before, cpus_after); +} +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/common/tuplet.hpp b/barretenberg/cpp/src/barretenberg/common/tuplet.hpp index 7fd7de6e1122..59b840134f89 100644 --- a/barretenberg/cpp/src/barretenberg/common/tuplet.hpp +++ b/barretenberg/cpp/src/barretenberg/common/tuplet.hpp @@ -75,8 +75,8 @@ #define _TUPLET_TYPES_CMP_WITH(T, U) \ ::std::enable_if_t<::tuplet::sfinae::detail::_all_true<::tuplet::sfinae::detail::_has_cmp...>(), bool> #else -#define _TUPLET_TYPES_EQ_WITH(T, U) ::std::enable_if_t<((::tuplet::sfinae::detail::_has_eq)&&...), bool> -#define _TUPLET_TYPES_CMP_WITH(T, U) ::std::enable_if_t<((::tuplet::sfinae::detail::_has_cmp)&&...), bool> +#define _TUPLET_TYPES_EQ_WITH(T, U) ::std::enable_if_t<((::tuplet::sfinae::detail::_has_eq) && ...), bool> +#define _TUPLET_TYPES_CMP_WITH(T, U) ::std::enable_if_t<((::tuplet::sfinae::detail::_has_cmp) && ...), bool> #endif #endif @@ -287,45 +287,31 @@ concept assignable_to = requires(U u, T t) { t = u; }; template concept ordered = requires(T const& t) { - { - t <=> t - }; + { t <=> t }; }; template concept ordered_with = requires(T const& t, U const& u) { - { - t <=> u - }; + { t <=> u }; }; template concept equality_comparable = requires(T const& t) { - { - t == t - } -> same_as; + { t == t } -> same_as; }; template concept equality_comparable_with = requires(T const& t, U const& u) { - { - t == u - } -> same_as; + { t == u } -> same_as; }; template concept partial_comparable = equality_comparable && requires(T const& t) { - { - t < t - } -> same_as; + { t < t } -> same_as; }; template concept partial_comparable_with = equality_comparable_with && requires(T const& t, U const& u) { - { - t < u - } -> same_as; - { - t > u - } -> same_as; + { t < u } -> same_as; + { t > u } -> same_as; }; #endif diff --git a/barretenberg/cpp/src/barretenberg/common/utils.hpp b/barretenberg/cpp/src/barretenberg/common/utils.hpp index 627dd4dd9344..38113d88ef18 100644 --- a/barretenberg/cpp/src/barretenberg/common/utils.hpp +++ b/barretenberg/cpp/src/barretenberg/common/utils.hpp @@ -32,9 +32,7 @@ template size_t hash_as_tuple(const Ts&... ts) // Define std::hash for any type that has a hash() method. template concept Hashable = requires(const T& t) { - { - t.hash() - } -> std::same_as; + { t.hash() } -> std::same_as; }; template struct std::hash { diff --git a/barretenberg/cpp/src/barretenberg/common/version.cpp b/barretenberg/cpp/src/barretenberg/common/version.cpp new file mode 100644 index 000000000000..4be8fd7916ac --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/version.cpp @@ -0,0 +1,7 @@ +#include "version.hpp" + +namespace bb { +// This is updated in-place by bootstrap.sh during the release process. This prevents +// the version string from needing to be present at build-time, simplifying e.g. caching. +const char* const BB_VERSION_PLACEHOLDER = "00000000.00000000.00000000"; +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/common/version.hpp b/barretenberg/cpp/src/barretenberg/common/version.hpp new file mode 100644 index 000000000000..68daa1c867f2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/version.hpp @@ -0,0 +1,6 @@ +#pragma once + +namespace bb { +// Version string placeholder that gets updated during release process +extern const char* const BB_VERSION_PLACEHOLDER; +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/constants.hpp b/barretenberg/cpp/src/barretenberg/constants.hpp index 229ba0de2a90..3897a7fb79fe 100644 --- a/barretenberg/cpp/src/barretenberg/constants.hpp +++ b/barretenberg/cpp/src/barretenberg/constants.hpp @@ -1,7 +1,20 @@ #pragma once +#include #include namespace bb { + +// Arbitrarily large constant (> size of the BN254 srs) used to ensure that the evaluations on the hypercube of the +// permutation argument polynomials (sigmas, ids) are unique, e.g. id[i][j] == id[m][n] iff (i == m && j == n) +constexpr uint32_t PERMUTATION_ARGUMENT_VALUE_SEPARATOR = 1 << 28; + +// The fixed size of the Translator trace where each accumulation gate, corresponding to one UltraOp, will occupy two +// rows. +static constexpr uint32_t CONST_TRANSLATOR_MINI_CIRCUIT_LOG_SIZE = 14; + +// -1 as each op occupies two rows in Translator trace +static constexpr uint32_t CONST_OP_QUEUE_LOG_SIZE = CONST_TRANSLATOR_MINI_CIRCUIT_LOG_SIZE - 1; + // The log of the max circuit size assumed in order to achieve constant sized Honk proofs // TODO(https://github.com/AztecProtocol/barretenberg/issues/1046): Remove the need for const sized proofs static constexpr uint32_t CONST_PROOF_SIZE_LOG_N = 28; @@ -9,7 +22,10 @@ static constexpr uint32_t CONST_PROOF_SIZE_LOG_N = 28; // The log of the max circuit size of circuits being folded. This size is assumed by the PG prover and verifier in order // to ensure a constant PG proof size and a PG recursive verifier circuit that is independent of the size of the // circuits being folded. -static constexpr uint32_t CONST_PG_LOG_N = 20; +static constexpr uint32_t CONST_PG_LOG_N = 21; + +// The size of the AVMRecursiveVerifier circuit arithmetized with Mega. +static constexpr uint32_t MEGA_AVM_LOG_N = 21; static constexpr uint32_t CONST_ECCVM_LOG_N = 16; @@ -49,5 +65,5 @@ static constexpr uint32_t NUM_INTERLEAVING_CLAIMS = 2; // When we branch a transcript, we want to clearly distinguish between what happened before and after the branching. We // increase the `round_index` of the original transcript by `BRANCHING_JUMP`, so that there is a gap of `BRANCHING_JUMP` // round indices between what happened before and after the branching. This constant is arbitrary. -static constexpr std::size_t BRANCHING_JUMP = 5; +static constexpr size_t BRANCHING_JUMP = 5; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa.test.cpp index 8caa089ce7a2..cdf781b57a9a 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa.test.cpp @@ -149,7 +149,7 @@ TEST(ecdsa, verify_signature_secp256r1_sha256_NIST_1) Qy = ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9 k = 94a1bbb14b906a61a280f245f9e93c7f3b4a6247824f5d33b9670787642a68de R = f3ac8061b514795b8843e3d6629527ed2afd6b1f6a555a7acabb5e6f79c8c2ac - S = 8bf77819ca05a6b2786c76262bf7371cef97b218e96f175a3ccdda2acc058903 + S = 740887e535fa594e879389d9d408c8e2cd4f4894bda8872ab6ebf098305d9c4e */ secp256r1::fq P_x = secp256r1::fq(0x3c59ff46c271bf83, 0xd3565de94bbfb12f, 0xf033bfa248db8fcc, 0x1ccbe91c075fc7f4) @@ -164,8 +164,8 @@ TEST(ecdsa, verify_signature_secp256r1_sha256_NIST_1) }; std::array s{ - 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x05, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b, 0xf7, 0x37, 0x1c, - 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a, 0xcc, 0x05, 0x89, 0x03, + 0x74, 0x08, 0x87, 0xe5, 0x35, 0xfa, 0x59, 0x4e, 0x87, 0x93, 0x89, 0xd9, 0xd4, 0x08, 0xc8, 0xe2, + 0xcd, 0x4f, 0x48, 0x94, 0xbd, 0xa8, 0x87, 0x2a, 0xb6, 0xeb, 0xf0, 0x98, 0x30, 0x5d, 0x9c, 0x4e, }; ecdsa_signature sig{ r, s, 27 }; diff --git a/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa_impl.hpp b/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa_impl.hpp index b742ac7cd810..d104b74e9a30 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/ecdsa/ecdsa_impl.hpp @@ -138,7 +138,7 @@ bool ecdsa_verify_signature(const std::string& message, uint256_t r_uint; uint256_t s_uint; uint256_t mod = uint256_t(Fr::modulus); - if (!public_key.on_curve()) { + if ((!public_key.on_curve()) || (public_key.is_point_at_infinity())) { return false; } const auto* r_buf = &sig.r[0]; @@ -154,9 +154,7 @@ bool ecdsa_verify_signature(const std::string& message, } // Check that the s value is less than |Fr| / 2 - if (s_uint * 2 > mod) { - throw_or_abort("s value is not less than curve order by 2"); - } + BB_ASSERT_LT(s_uint, (mod + 1) / 2, "s value is not less than curve order by 2"); Fr r = Fr(r_uint); Fr s = Fr(s_uint); @@ -171,7 +169,9 @@ bool ecdsa_verify_signature(const std::string& message, Fr u1 = z * s_inv; Fr u2 = r * s_inv; - typename G1::affine_element R(typename G1::element(public_key) * u2 + G1::one * u1); + typename G1::affine_element R((typename G1::element(public_key) * u2) + (G1::one * u1)); + BB_ASSERT_EQ(R.is_point_at_infinity(), false, "Result of the scalar multiplication is the point at infinity."); + uint256_t Rx(R.x); Fr result(Rx); return result == r; diff --git a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp index 37d9585512a5..53ae0ac4b0e7 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp @@ -137,7 +137,7 @@ template struct GeneratorContext { GeneratorContext() = default; GeneratorContext(size_t hash_index) - : offset(hash_index){}; + : offset(hash_index) {}; GeneratorContext(size_t _offset, std::string_view _domain_separator) : offset(_offset) , domain_separator(_domain_separator) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index 9f6211ceb410..d122aae98153 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -58,7 +58,7 @@ template class ContentAddressedAppendOn using RollbackCallback = EmptyResponseCallback; using RemoveHistoricBlockCallback = std::function&)>; using UnwindBlockCallback = std::function&)>; - using FinaliseBlockCallback = EmptyResponseCallback; + using FinalizeBlockCallback = EmptyResponseCallback; using GetBlockForIndexCallback = std::function&)>; using CheckpointCallback = EmptyResponseCallback; using CheckpointCommitCallback = EmptyResponseCallback; @@ -249,7 +249,7 @@ template class ContentAddressedAppendOn void unwind_block(const block_number_t& blockNumber, const UnwindBlockCallback& on_completion); - void finalise_block(const block_number_t& blockNumber, const FinaliseBlockCallback& on_completion); + void finalize_block(const block_number_t& blockNumber, const FinalizeBlockCallback& on_completion); void checkpoint(const CheckpointCallback& on_completion); void commit_checkpoint(const CheckpointCommitCallback& on_completion); @@ -340,7 +340,7 @@ ContentAddressedAppendOnlyTree::ContentAddressedAppendOnly signal.wait_for_level(0); if (!result.success) { - throw std::runtime_error(format("Failed to initialise tree: ", result.message)); + throw std::runtime_error(format("Failed to initialize tree: ", result.message)); } store_->get_meta(meta); @@ -1037,16 +1037,16 @@ void ContentAddressedAppendOnlyTree::unwind_block(const bl } template -void ContentAddressedAppendOnlyTree::finalise_block(const block_number_t& blockNumber, - const FinaliseBlockCallback& on_completion) +void ContentAddressedAppendOnlyTree::finalize_block(const block_number_t& blockNumber, + const FinalizeBlockCallback& on_completion) { auto job = [=, this]() { execute_and_report( [=, this]() { if (blockNumber == 0) { - throw std::runtime_error("Unable to finalise block 0"); + throw std::runtime_error("Unable to finalize block 0"); } - store_->advance_finalised_block(blockNumber); + store_->advance_finalized_block(blockNumber); }, on_completion); }; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index c06edc738a9c..0dc8d50a477c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -68,12 +68,12 @@ void check_size(TreeType& tree, index_t expected_size, bool includeUncommitted = signal.wait_for_level(); } -void check_finalised_block_height(TreeType& tree, block_number_t expected_finalised_block) +void check_finalized_block_height(TreeType& tree, block_number_t expected_finalized_block) { Signal signal; auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, true); - EXPECT_EQ(response.inner.meta.finalisedBlockHeight, expected_finalised_block); + EXPECT_EQ(response.inner.meta.finalizedBlockHeight, expected_finalized_block); signal.signal_level(); }; tree.get_meta_data(false, completion); @@ -85,7 +85,7 @@ void check_block_height(TreeType& tree, index_t expected_block_height) Signal signal; auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, true); - EXPECT_EQ(response.inner.meta.unfinalisedBlockHeight, expected_block_height); + EXPECT_EQ(response.inner.meta.unfinalizedBlockHeight, expected_block_height); signal.signal_level(); }; tree.get_meta_data(true, completion); @@ -239,7 +239,7 @@ void add_values(TreeType& tree, const std::vector& values) signal.wait_for_level(); } -void finalise_block(TreeType& tree, const block_number_t& blockNumber, bool expected_success = true) +void finalize_block(TreeType& tree, const block_number_t& blockNumber, bool expected_success = true) { Signal signal; auto completion = [&](const Response& response) -> void { @@ -249,7 +249,7 @@ void finalise_block(TreeType& tree, const block_number_t& blockNumber, bool expe } signal.signal_level(); }; - tree.finalise_block(blockNumber, completion); + tree.finalize_block(blockNumber, completion); signal.wait_for_level(); } @@ -833,9 +833,9 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_multiple_blocks) TreeType tree(std::move(store), pool); MemoryTree memdb(depth); - auto check = [&](index_t expected_size, index_t expected_unfinalised_block_height) { + auto check = [&](index_t expected_size, index_t expected_unfinalized_block_height) { check_size(tree, expected_size); - check_block_height(tree, expected_unfinalised_block_height); + check_block_height(tree, expected_unfinalized_block_height); check_root(tree, memdb.root()); check_sibling_path(tree, 0, memdb.get_sibling_path(0)); check_sibling_path(tree, expected_size - 1, memdb.get_sibling_path(expected_size - 1)); @@ -870,9 +870,9 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_add_varying_size_blocks) TreeType tree(std::move(store), pool); MemoryTree memdb(depth); - auto check = [&](index_t expected_size, index_t expected_unfinalised_block_height) { + auto check = [&](index_t expected_size, index_t expected_unfinalized_block_height) { check_size(tree, expected_size); - check_block_height(tree, expected_unfinalised_block_height); + check_block_height(tree, expected_unfinalized_block_height); check_root(tree, memdb.root()); check_sibling_path(tree, 0, memdb.get_sibling_path(0)); check_sibling_path(tree, expected_size - 1, memdb.get_sibling_path(expected_size - 1)); @@ -914,9 +914,9 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_retrieve_historic_siblin std::vector historicPathsZeroIndex; std::vector historicPathsMaxIndex; - auto check = [&](index_t expected_size, index_t expected_unfinalised_block_height) { + auto check = [&](index_t expected_size, index_t expected_unfinalized_block_height) { check_size(tree, expected_size); - check_block_height(tree, expected_unfinalised_block_height); + check_block_height(tree, expected_unfinalized_block_height); check_root(tree, memdb.root()); check_sibling_path(tree, 0, memdb.get_sibling_path(0)); check_sibling_path(tree, expected_size - 1, memdb.get_sibling_path(expected_size - 1)); @@ -1300,8 +1300,8 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_remove_historic_block_da check(expected_size, i); commit_tree(tree); - // immediately finalise the block - finalise_block(tree, i + 1); + // immediately finalize the block + finalize_block(tree, i + 1); historicPathsZeroIndex.push_back(memdb.get_sibling_path(0)); historicPathsMaxIndex.push_back(memdb.get_sibling_path(expected_size - 1)); @@ -1628,7 +1628,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_and_remove_histor block_number_t blockToRemove = 1; while (blockToRemove < blockNumber) { - finalise_block(tree, blockToRemove + 1); + finalize_block(tree, blockToRemove + 1); // Remove the historic next block remove_historic_block(tree, blockToRemove); @@ -1735,7 +1735,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_retrieve_block_numbers_b } } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalized_blocks) { std::string name = random_string(); constexpr uint32_t depth = 10; @@ -1747,7 +1747,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks uint32_t blockSize = 16; uint32_t numBlocks = 16; - uint32_t finalisedBlockDelay = 4; + uint32_t finalizedBlockDelay = 4; std::vector values = create_values(blockSize * numBlocks); for (uint32_t i = 0; i < numBlocks; i++) { @@ -1761,25 +1761,25 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks add_values(tree, to_add); commit_tree(tree); - block_number_t expectedFinalisedBlock = i < finalisedBlockDelay ? 0 : i - finalisedBlockDelay; - check_finalised_block_height(tree, expectedFinalisedBlock); + block_number_t expectedFinalizedBlock = i < finalizedBlockDelay ? 0 : i - finalizedBlockDelay; + check_finalized_block_height(tree, expectedFinalizedBlock); - if (i >= finalisedBlockDelay) { + if (i >= finalizedBlockDelay) { - block_number_t blockToFinalise = expectedFinalisedBlock + 1; + block_number_t blockToFinalize = expectedFinalizedBlock + 1; - // attempting to finalise a block that doesn't exist should fail - finalise_block(tree, blockToFinalise + numBlocks, false); + // attempting to finalize a block that doesn't exist should fail + finalize_block(tree, blockToFinalize + numBlocks, false); - finalise_block(tree, blockToFinalise, true); + finalize_block(tree, blockToFinalize, true); - // finalising the currently finalised block should succeed - finalise_block(tree, blockToFinalise, true); + // finalising the currently finalized block should succeed + finalize_block(tree, blockToFinalize, true); } } } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_finalise_multiple_blocks) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_finalize_multiple_blocks) { std::string name = random_string(); constexpr uint32_t depth = 10; @@ -1807,12 +1807,12 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_finalise_multiple_blocks check_block_height(tree, numBlocks); - block_number_t blockToFinalise = 8; + block_number_t blockToFinalize = 8; - finalise_block(tree, blockToFinalise); + finalize_block(tree, blockToFinalize); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyond_pending_chain) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalize_block_beyond_pending_chain) { std::string name = random_string(); constexpr uint32_t depth = 10; @@ -1827,7 +1827,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyon std::vector values = create_values(blockSize * numBlocks); // finalising block 1 should fail - finalise_block(tree, 1, false); + finalize_block(tree, 1, false); for (uint32_t i = 0; i < numBlocks; i++) { std::vector to_add; @@ -1844,12 +1844,12 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyon check_block_height(tree, numBlocks); // should fail - finalise_block(tree, numBlocks + 1, false); + finalize_block(tree, numBlocks + 1, false); - // finalise the entire chain - block_number_t blockToFinalise = numBlocks; + // finalize the entire chain + block_number_t blockToFinalize = numBlocks; - finalise_block(tree, blockToFinalise); + finalize_block(tree, blockToFinalize); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_fork_from_unwound_blocks) @@ -1888,7 +1888,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_fork_from_expired_hi add_values(tree, values); commit_tree(tree); } - finalise_block(tree, 3); + finalize_block(tree, 3); remove_historic_block(tree, 1); remove_historic_block(tree, 2); @@ -1928,7 +1928,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_fork_from_block_zero_whe check_sibling_path(tree2, 0, path, false, true); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_unwind_finalised_block) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_unwind_finalized_block) { std::string name = random_string(); constexpr uint32_t depth = 10; @@ -1956,17 +1956,17 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_unwind_finalised_blo check_block_height(tree, numBlocks); - block_number_t blockToFinalise = 8; + block_number_t blockToFinalize = 8; - finalise_block(tree, blockToFinalise); + finalize_block(tree, blockToFinalize); - for (uint32_t i = numBlocks; i > blockToFinalise; i--) { + for (uint32_t i = numBlocks; i > blockToFinalize; i--) { unwind_block(tree, i); } - unwind_block(tree, blockToFinalise, false); + unwind_block(tree, blockToFinalize, false); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_historically_remove_finalised_block) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_historically_remove_finalized_block) { std::string name = random_string(); constexpr uint32_t depth = 10; @@ -1994,14 +1994,14 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_historically_remove_ check_block_height(tree, numBlocks); - block_number_t blockToFinalise = 8; + block_number_t blockToFinalize = 8; - finalise_block(tree, blockToFinalise); + finalize_block(tree, blockToFinalize); - for (uint32_t i = 0; i < blockToFinalise - 1; i++) { + for (uint32_t i = 0; i < blockToFinalize - 1; i++) { remove_historic_block(tree, i + 1); } - remove_historic_block(tree, blockToFinalise, false); + remove_historic_block(tree, blockToFinalize, false); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_checkpoint_and_revert_forks) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp index 27588a9ed88b..dbf10e42b246 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp @@ -12,8 +12,6 @@ #include "barretenberg/crypto/pedersen_hash/pedersen.hpp" #include "barretenberg/crypto/poseidon2/poseidon2.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" #include namespace bb::crypto::merkle_tree { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.test.cpp index bf04e945504a..6c0e0657c063 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.test.cpp @@ -1,4 +1,5 @@ #include "hash.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" #include "memory_tree.hpp" #include diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index a768fe725830..6bb215d57178 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -43,7 +43,7 @@ namespace bb::crypto::merkle_tree { /** - * @brief Implements a parallelised batch insertion indexed tree + * @brief Implements a parallelized batch insertion indexed tree * Accepts template argument of the type of store backing the tree, the type of store containing the leaves and the * hashing policy * All public methods are asynchronous unless marked otherwise @@ -71,7 +71,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree store, std::shared_ptr workers, const index_t& initial_size) - : ContentAddressedIndexedTree(std::move(store), workers, initial_size, std::vector()){}; + : ContentAddressedIndexedTree(std::move(store), workers, initial_size, std::vector()) {}; ContentAddressedIndexedTree(ContentAddressedIndexedTree const& other) = delete; ContentAddressedIndexedTree(ContentAddressedIndexedTree&& other) = delete; ~ContentAddressedIndexedTree() = default; @@ -305,7 +305,7 @@ ContentAddressedIndexedTree::ContentAddressedIndexedTree( TreeMeta meta; store_->get_meta(meta); - // if the tree already contains leaves then it's been initialised in the past + // if the tree already contains leaves then it's been initialized in the past if (meta.size > 0) { return; } @@ -346,7 +346,7 @@ ContentAddressedIndexedTree::ContentAddressedIndexedTree( ContentAddressedAppendOnlyTree::add_values_internal(appended_hashes, completion, false); signal.wait_for_level(0); if (!result.success) { - throw std::runtime_error(format("Failed to initialise tree: ", result.message)); + throw std::runtime_error(format("Failed to initialize tree: ", result.message)); } store_->get_meta(meta); meta.initialRoot = result.inner.root; @@ -552,8 +552,8 @@ void ContentAddressedIndexedTree::add_or_update_values_int bool capture_witness) { // We first take a copy of the leaf values and their locations within the set given to us - std::shared_ptr>> values_to_be_sorted = - std::make_shared>>(values.size()); + std::shared_ptr>> values_to_be_sorted = + std::make_shared>>(values.size()); for (size_t i = 0; i < values.size(); ++i) { (*values_to_be_sorted)[i] = std::make_pair(values[i], i); } @@ -958,8 +958,8 @@ void ContentAddressedIndexedTree::generate_insertions( max_size_)); } for (size_t i = 0; i < values.size(); ++i) { - std::pair& value_pair = values[i]; - size_t index_into_appended_leaves = value_pair.second; + std::pair& value_pair = values[i]; + index_t index_into_appended_leaves = value_pair.second; index_t index_of_new_leaf = static_cast(index_into_appended_leaves) + meta.size; if (value_pair.first.is_empty()) { continue; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp index e0ffe1e3c20a..ae2b792e98b0 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp @@ -224,12 +224,12 @@ void check_sibling_path(TypeOfTree& tree, EXPECT_EQ(path, expected_sibling_path); } -template void check_unfinalised_block_height(TypeOfTree& tree, index_t expected_block_height) +template void check_unfinalized_block_height(TypeOfTree& tree, index_t expected_block_height) { Signal signal; auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, true); - EXPECT_EQ(response.inner.meta.unfinalisedBlockHeight, expected_block_height); + EXPECT_EQ(response.inner.meta.unfinalizedBlockHeight, expected_block_height); signal.signal_level(); }; tree.get_meta_data(true, completion); @@ -341,14 +341,14 @@ void remove_historic_block(TypeOfTree& tree, const block_number_t& blockNumber, } template -void finalise_block(TypeOfTree& tree, const block_number_t& blockNumber, bool expected_success = true) +void finalize_block(TypeOfTree& tree, const block_number_t& blockNumber, bool expected_success = true) { Signal signal; auto completion = [&](const Response& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; - tree.finalise_block(blockNumber, completion); + tree.finalize_block(blockNumber, completion); signal.wait_for_level(); } @@ -369,7 +369,7 @@ template void check_block_height(TypeOfTree& tree, index_t Signal signal; auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, true); - EXPECT_EQ(response.inner.meta.unfinalisedBlockHeight, expected_block_height); + EXPECT_EQ(response.inner.meta.unfinalizedBlockHeight, expected_block_height); signal.signal_level(); }; tree.get_meta_data(true, completion); @@ -2044,7 +2044,7 @@ TEST_F(PersistedContentAddressedIndexedTreeTest, test_can_create_forks_at_histor treeAtBlock2, { batch3[3] }, 2, 20 + batch_size, { std::nullopt }, true, false); check_historic_find_leaf_index_from(treeAtBlock2, batch3[3], 2, 20 + batch_size, 35 + batch_size, true, true); - check_unfinalised_block_height(treeAtBlock2, 2); + check_unfinalized_block_height(treeAtBlock2, 2); // It should be impossible to commit using the image commit_tree(treeAtBlock2, false); @@ -2192,7 +2192,7 @@ TEST_F(PersistedContentAddressedIndexedTreeTest, test_remove_historical_blocks) lowLeaf = get_historic_low_leaf(tree, 2, PublicDataLeafValue(60, 0)); EXPECT_EQ(lowLeaf.index, 2); - finalise_block(tree, 3); + finalize_block(tree, 3); // remove historical block 1 remove_historic_block(tree, 1); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp index 4f48e29efff4..a8a395c18f65 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp @@ -85,7 +85,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_meta_data) metaData.root = VALUES[2]; metaData.depth = 40; metaData.oldestHistoricBlock = 87; - metaData.unfinalisedBlockHeight = 95; + metaData.unfinalizedBlockHeight = 95; metaData.name = "Note hash tree"; metaData.size = 60; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); @@ -113,7 +113,7 @@ TEST_F(LMDBTreeStoreTest, can_read_data_from_multiple_threads) metaData.root = VALUES[2]; metaData.depth = 40; metaData.oldestHistoricBlock = 87; - metaData.unfinalisedBlockHeight = 95; + metaData.unfinalizedBlockHeight = 95; metaData.name = "Note hash tree"; metaData.size = 60; LMDBTreeStore store(_directory, "DB1", _mapSize, 2); @@ -164,7 +164,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_multiple_blocks_with_meta) meta.size = blockData.size; meta.root = blockData.root; meta.depth = 32; - meta.unfinalisedBlockHeight = i + start_block; + meta.unfinalizedBlockHeight = i + start_block; meta.name = "NullifierTree"; store.write_meta_data(meta, *transaction); transaction->commit(); @@ -192,7 +192,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_multiple_blocks_with_meta) EXPECT_EQ(meta.size, blockData.size); EXPECT_EQ(meta.root, blockData.root); EXPECT_EQ(meta.depth, 32); - EXPECT_EQ(meta.unfinalisedBlockHeight, blockData.blockNumber); + EXPECT_EQ(meta.unfinalizedBlockHeight, blockData.blockNumber); EXPECT_EQ(meta.name, "NullifierTree"); } } @@ -604,7 +604,7 @@ TEST_F(LMDBTreeStoreTest, reports_physical_file_size) metaData.size = blockData.size; metaData.root = blockData.root; metaData.depth = 32; - metaData.unfinalisedBlockHeight = static_cast(i); + metaData.unfinalizedBlockHeight = static_cast(i); metaData.name = "NullifierTree"; // Write metadata and block data with different values each iteration diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp index e90380a07960..030932f15427 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp @@ -17,15 +17,15 @@ class MockTransaction { using Ptr = std::unique_ptr; bool get_node(uint32_t, index_t, std::vector&) const { return false; } - template void get_value_by_integer(T&, std::vector&){}; + template void get_value_by_integer(T&, std::vector&) {}; - void get_value(std::vector&, std::vector&){}; + void get_value(std::vector&, std::vector&) {}; void put_node(uint32_t, index_t, const std::vector&) {} - template void put_value_by_integer(T&, std::vector&){}; + template void put_value_by_integer(T&, std::vector&) {}; - void put_value(std::vector&, std::vector&){}; + void put_value(std::vector&, std::vector&) {}; }; class MockPersistedStore { @@ -88,8 +88,8 @@ template class ArrayStore { root = meta.root; } - void commit(){}; - void rollback(){}; + void commit() {}; + void rollback() {}; ReadTransactionPtr create_read_transactiono() { return std::make_unique(); } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 6321dbedb135..f7fe1fb742f1 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -187,7 +187,7 @@ template class ContentAddressedCachedTreeStore { void unwind_block(const block_number_t& blockNumber, TreeMeta& finalMeta, TreeDBStats& dbStats); - void advance_finalised_block(const block_number_t& blockNumber); + void advance_finalized_block(const block_number_t& blockNumber); std::optional find_block_for_index(const index_t& index, ReadTransaction& tx) const; @@ -203,7 +203,7 @@ template class ContentAddressedCachedTreeStore { struct ForkConstantData { std::string name_; uint32_t depth_; - std::optional initialised_from_block_; + std::optional initialized_from_block_; }; ForkConstantData forkConstantData_; mutable std::mutex mtx_; @@ -212,9 +212,9 @@ template class ContentAddressedCachedTreeStore { Cache cache_; - void initialise(); + void initialize(); - void initialise_from_block(const block_number_t& blockNumber); + void initialize_from_block(const block_number_t& blockNumber); bool read_persisted_meta(TreeMeta& m, ReadTransaction& tx) const; @@ -256,7 +256,7 @@ ContentAddressedCachedTreeStore::ContentAddressedCachedTreeStore( , dataStore_(dataStore) , cache_(levels) { - initialise(); + initialize(); } template @@ -269,10 +269,10 @@ ContentAddressedCachedTreeStore::ContentAddressedCachedTreeStore( , dataStore_(dataStore) , cache_(levels) { - initialise_from_block(referenceBlockNumber); + initialize_from_block(referenceBlockNumber); } -// Much Like the commit/rollback/set finalised/remove historic blocks apis +// Much Like the commit/rollback/set finalized/remove historic blocks apis // These 3 apis (checkpoint/revert_checkpoint/commit_checkpoint) all assume they are not called // during the process of reading/writing uncommitted state // This is reasonable, they intended for use by forks at the point of starting/ending a function call @@ -308,9 +308,9 @@ index_t ContentAddressedCachedTreeStore::constrain_tree_size_to_o // We need to identify the size of the committed tree as it exists from our perspective // We either take from the fork's constant data if available or we read the meta data from the store index_t sizeLimit = 0; - if (forkConstantData_.initialised_from_block_.has_value()) { + if (forkConstantData_.initialized_from_block_.has_value()) { // We are a fork. Take from constant data - sizeLimit = forkConstantData_.initialised_from_block_.value().size; + sizeLimit = forkConstantData_.initialized_from_block_.value().size; } else { // We are the main tree. Read from the store, only use committed so as to not violate any requests for purely // committed data @@ -610,11 +610,11 @@ void ContentAddressedCachedTreeStore::enrich_meta_from_fork_const // Here we update the given meta with properties from our constant fork data if available. // If we are not a fork then nothing is to be updated // If we are a fork then we will overwrite the root, size and committed size with the original fork values - if (forkConstantData_.initialised_from_block_.has_value()) { - m.size = forkConstantData_.initialised_from_block_->size; - m.committedSize = forkConstantData_.initialised_from_block_->size; - m.root = forkConstantData_.initialised_from_block_->root; - m.unfinalisedBlockHeight = forkConstantData_.initialised_from_block_->blockNumber; + if (forkConstantData_.initialized_from_block_.has_value()) { + m.size = forkConstantData_.initialized_from_block_->size; + m.committedSize = forkConstantData_.initialized_from_block_->size; + m.root = forkConstantData_.initialized_from_block_->root; + m.unfinalizedBlockHeight = forkConstantData_.initialized_from_block_->blockNumber; } } @@ -652,7 +652,7 @@ template void ContentAddressedCachedTreeStore::commit_block(TreeMeta& fina TreeMeta meta; // We don't allow commits using images/forks - if (forkConstantData_.initialised_from_block_.has_value()) { + if (forkConstantData_.initialized_from_block_.has_value()) { throw std::runtime_error("Committing a fork is forbidden"); } get_meta(meta); @@ -696,7 +696,7 @@ void ContentAddressedCachedTreeStore::commit_block(TreeMeta& fina WriteTransactionPtr tx = create_write_transaction(); try { if (dataPresent) { - // std::cout << "Persisting data for block " << uncommittedMeta.unfinalisedBlockHeight + 1 << std::endl; + // std::cout << "Persisting data for block " << uncommittedMeta.unfinalizedBlockHeight + 1 << std::endl; // Persist the leaf indices persist_leaf_indices(*tx); } @@ -709,13 +709,13 @@ void ContentAddressedCachedTreeStore::commit_block(TreeMeta& fina if (dataPresent || meta.size > 0) { persist_node(std::optional(meta.root), 0, *tx); } - ++meta.unfinalisedBlockHeight; + ++meta.unfinalizedBlockHeight; if (meta.oldestHistoricBlock == 0) { meta.oldestHistoricBlock = 1; } // std::cout << "New root " << uncommittedMeta.root << std::endl; - BlockPayload block{ .size = meta.size, .blockNumber = meta.unfinalisedBlockHeight, .root = meta.root }; - dataStore_->write_block_data(meta.unfinalisedBlockHeight, block, *tx); + BlockPayload block{ .size = meta.size, .blockNumber = meta.unfinalizedBlockHeight, .root = meta.root }; + dataStore_->write_block_data(meta.unfinalizedBlockHeight, block, *tx); dataStore_->write_block_index_data(block.blockNumber, block.size, *tx); meta.committedSize = meta.size; @@ -824,17 +824,17 @@ void ContentAddressedCachedTreeStore::persist_meta(TreeMeta& m, W } template -void ContentAddressedCachedTreeStore::advance_finalised_block(const block_number_t& blockNumber) +void ContentAddressedCachedTreeStore::advance_finalized_block(const block_number_t& blockNumber) { TreeMeta committedMeta; TreeMeta uncommittedMeta; BlockPayload blockPayload; if (blockNumber < 1) { throw std::runtime_error( - format("Unable to advance finalised block: ", blockNumber, ". Tree name: ", forkConstantData_.name_)); + format("Unable to advance finalized block: ", blockNumber, ". Tree name: ", forkConstantData_.name_)); } - if (forkConstantData_.initialised_from_block_.has_value()) { - throw std::runtime_error("Advancing the finalised block on a fork is forbidden"); + if (forkConstantData_.initialized_from_block_.has_value()) { + throw std::runtime_error("Advancing the finalized block on a fork is forbidden"); } { // read both committed and uncommitted meta values @@ -842,36 +842,36 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con get_meta(uncommittedMeta); get_meta(committedMeta, *readTx, false); if (!dataStore_->read_block_data(blockNumber, blockPayload, *readTx)) { - throw std::runtime_error(format("Unable to advance finalised block: ", + throw std::runtime_error(format("Unable to advance finalized block: ", blockNumber, ". Failed to read block data. Tree name: ", forkConstantData_.name_)); } } - // do nothing if the block is already finalised - if (committedMeta.finalisedBlockHeight >= blockNumber) { + // do nothing if the block is already finalized + if (committedMeta.finalizedBlockHeight >= blockNumber) { return; } - // can currently only finalise up to the unfinalised block height - if (committedMeta.finalisedBlockHeight > committedMeta.unfinalisedBlockHeight) { - throw std::runtime_error(format("Unable to finalise block ", + // can currently only finalize up to the unfinalized block height + if (committedMeta.finalizedBlockHeight > committedMeta.unfinalizedBlockHeight) { + throw std::runtime_error(format("Unable to finalize block ", blockNumber, - " currently unfinalised block height ", - committedMeta.finalisedBlockHeight)); + " currently unfinalized block height ", + committedMeta.finalizedBlockHeight)); } { - // commit the new finalised block + // commit the new finalized block WriteTransactionPtr writeTx = create_write_transaction(); try { - committedMeta.finalisedBlockHeight = blockNumber; + committedMeta.finalizedBlockHeight = blockNumber; // persist the new meta data persist_meta(committedMeta, *writeTx); writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw std::runtime_error(format("Unable to commit advance of finalised block: ", + throw std::runtime_error(format("Unable to commit advance of finalized block: ", blockNumber, ". Tree name: ", forkConstantData_.name_, @@ -881,7 +881,7 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con } // commit successful, now also update the uncommitted meta - uncommittedMeta.finalisedBlockHeight = committedMeta.finalisedBlockHeight; + uncommittedMeta.finalizedBlockHeight = committedMeta.finalizedBlockHeight; put_meta(uncommittedMeta); } @@ -898,7 +898,7 @@ void ContentAddressedCachedTreeStore::unwind_block(const block_nu throw std::runtime_error( format("Unable to unwind block: ", blockNumber, ". Tree name: ", forkConstantData_.name_)); } - if (forkConstantData_.initialised_from_block_.has_value()) { + if (forkConstantData_.initialized_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); } { @@ -912,25 +912,25 @@ void ContentAddressedCachedTreeStore::unwind_block(const block_nu " Can't unwind with uncommitted data, first rollback before unwinding. Tree name: ", forkConstantData_.name_)); } - if (blockNumber > uncommittedMeta.unfinalisedBlockHeight) { + if (blockNumber > uncommittedMeta.unfinalizedBlockHeight) { // Nothing to do, the block doesn't exist. Maybe it was already removed finalMeta = uncommittedMeta; extract_db_stats(dbStats, *readTx); return; } - if (blockNumber != uncommittedMeta.unfinalisedBlockHeight) { + if (blockNumber != uncommittedMeta.unfinalizedBlockHeight) { throw std::runtime_error(format("Unable to unwind block: ", blockNumber, - " unfinalisedBlockHeight: ", - committedMeta.unfinalisedBlockHeight, + " unfinalizedBlockHeight: ", + committedMeta.unfinalizedBlockHeight, ". Tree name: ", forkConstantData_.name_)); } - if (blockNumber <= uncommittedMeta.finalisedBlockHeight) { + if (blockNumber <= uncommittedMeta.finalizedBlockHeight) { throw std::runtime_error(format("Unable to unwind block: ", blockNumber, - " finalisedBlockHeight: ", - committedMeta.finalisedBlockHeight, + " finalizedBlockHeight: ", + committedMeta.finalizedBlockHeight, ". Tree name: ", forkConstantData_.name_)); } @@ -971,7 +971,7 @@ void ContentAddressedCachedTreeStore::unwind_block(const block_nu // remove the block from the block data table dataStore_->delete_block_data(blockNumber, *writeTx); dataStore_->delete_block_index(blockData.size, blockData.blockNumber, *writeTx); - uncommittedMeta.unfinalisedBlockHeight = previousBlockData.blockNumber; + uncommittedMeta.unfinalizedBlockHeight = previousBlockData.blockNumber; uncommittedMeta.size = previousBlockData.size; uncommittedMeta.committedSize = previousBlockData.size; uncommittedMeta.root = previousBlockData.root; @@ -1009,7 +1009,7 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con throw std::runtime_error( format("Unable to remove historical block: ", blockNumber, ". Tree name: ", forkConstantData_.name_)); } - if (forkConstantData_.initialised_from_block_.has_value()) { + if (forkConstantData_.initialized_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); } { @@ -1032,11 +1032,11 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con ". Tree name: ", forkConstantData_.name_)); } - if (blockNumber >= committedMeta.finalisedBlockHeight) { + if (blockNumber >= committedMeta.finalizedBlockHeight) { throw std::runtime_error(format("Unable to remove historical block: ", blockNumber, - " finalisedBlockHeight: ", - committedMeta.finalisedBlockHeight, + " finalizedBlockHeight: ", + committedMeta.finalizedBlockHeight, ". Tree name: ", forkConstantData_.name_)); } @@ -1166,7 +1166,7 @@ void ContentAddressedCachedTreeStore::remove_node(const std::opti } } -template void ContentAddressedCachedTreeStore::initialise() +template void ContentAddressedCachedTreeStore::initialize() { // Read the persisted meta data, if the name or depth of the tree is not consistent with what was provided during // construction then we throw @@ -1180,7 +1180,7 @@ template void ContentAddressedCachedTreeStore void ContentAddressedCachedTreeStore void ContentAddressedCachedTreeStore -void ContentAddressedCachedTreeStore::initialise_from_block(const block_number_t& blockNumber) +void ContentAddressedCachedTreeStore::initialize_from_block(const block_number_t& blockNumber) { // Read the persisted meta data, if the name or depth of the tree is not consistent with what was provided during // construction then we throw @@ -1217,7 +1217,7 @@ void ContentAddressedCachedTreeStore::initialise_from_block(const bool success = read_persisted_meta(meta, *tx); if (success) { if (forkConstantData_.name_ != meta.name || forkConstantData_.depth_ != meta.depth) { - throw std::runtime_error(format("Inconsistent tree meta data when initialising ", + throw std::runtime_error(format("Inconsistent tree meta data when initializing ", forkConstantData_.name_, " with depth ", forkConstantData_.depth_, @@ -1230,24 +1230,24 @@ void ContentAddressedCachedTreeStore::initialise_from_block(const } } else { - throw std::runtime_error(format("Tree found to be uninitialised when attempting to create ", + throw std::runtime_error(format("Tree found to be uninitialized when attempting to create ", forkConstantData_.name_, " from block ", blockNumber)); } - if (meta.unfinalisedBlockHeight < blockNumber) { - throw std::runtime_error(format("Unable to initialise from future block: ", + if (meta.unfinalizedBlockHeight < blockNumber) { + throw std::runtime_error(format("Unable to initialize from future block: ", blockNumber, - " unfinalisedBlockHeight: ", - meta.unfinalisedBlockHeight, + " unfinalizedBlockHeight: ", + meta.unfinalizedBlockHeight, ". Tree name: ", forkConstantData_.name_)); } if (meta.oldestHistoricBlock > blockNumber && blockNumber != 0) { throw std::runtime_error(format("Unable to fork from expired historical block: ", blockNumber, - " unfinalisedBlockHeight: ", + " unfinalizedBlockHeight: ", meta.oldestHistoricBlock, ". Tree name: ", forkConstantData_.name_)); @@ -1261,7 +1261,7 @@ void ContentAddressedCachedTreeStore::initialise_from_block(const throw std::runtime_error( format("Failed to retrieve block data: ", blockNumber, ". Tree name: ", forkConstantData_.name_)); } - forkConstantData_.initialised_from_block_ = blockData; + forkConstantData_.initialized_from_block_ = blockData; // Ensure the meta reflects the fork constant data enrich_meta_from_fork_constant_data(meta); cache_.put_meta(meta); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp index 9246ed5ec981..5e6325244a40 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp @@ -17,7 +17,7 @@ using CacheType = ContentAddressedCache; class ContentAddressedCacheTest : public testing::Test { protected: void SetUp() override {} - void TearDown() override{}; + void TearDown() override {}; }; uint64_t get_index(uint64_t max_index = 0) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp index 532b8124373b..debc12443771 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp @@ -24,8 +24,8 @@ struct TreeMeta { index_t initialSize; bb::fr initialRoot; block_number_t oldestHistoricBlock; - block_number_t unfinalisedBlockHeight; - block_number_t finalisedBlockHeight; + block_number_t unfinalizedBlockHeight; + block_number_t finalizedBlockHeight; MSGPACK_FIELDS(name, depth, @@ -35,8 +35,8 @@ struct TreeMeta { initialSize, initialRoot, oldestHistoricBlock, - unfinalisedBlockHeight, - finalisedBlockHeight) + unfinalizedBlockHeight, + finalizedBlockHeight) TreeMeta(std::string n, uint32_t d, @@ -56,8 +56,8 @@ struct TreeMeta { , initialSize(is) , initialRoot(ir) , oldestHistoricBlock(o) - , unfinalisedBlockHeight(u) - , finalisedBlockHeight(f) + , unfinalizedBlockHeight(u) + , finalizedBlockHeight(f) {} TreeMeta() = default; ~TreeMeta() = default; @@ -70,8 +70,8 @@ struct TreeMeta { { return name == other.name && depth == other.depth && size == other.size && committedSize == other.committedSize && root == other.root && initialRoot == other.initialRoot && - initialSize == other.initialSize && unfinalisedBlockHeight == other.unfinalisedBlockHeight && - oldestHistoricBlock == other.oldestHistoricBlock && finalisedBlockHeight == other.finalisedBlockHeight; + initialSize == other.initialSize && unfinalizedBlockHeight == other.unfinalizedBlockHeight && + oldestHistoricBlock == other.oldestHistoricBlock && finalizedBlockHeight == other.finalizedBlockHeight; } }; @@ -80,8 +80,8 @@ inline std::ostream& operator<<(std::ostream& os, const TreeMeta& meta) os << "TreeMeta{name: " << meta.name << ", depth: " << meta.depth << ", size: " << std::dec << (meta.size) << ", committedSize: " << std::dec << meta.committedSize << ", root: " << meta.root << ", initialSize: " << std::dec << meta.initialSize << ", initialRoot: " << meta.initialRoot - << ", oldestHistoricBlock: " << std::dec << meta.oldestHistoricBlock << ", finalisedBlockHeight: " << std::dec - << meta.finalisedBlockHeight << ", unfinalisedBlockHeight: " << std::dec << meta.unfinalisedBlockHeight << "}"; + << ", oldestHistoricBlock: " << std::dec << meta.oldestHistoricBlock << ", finalizedBlockHeight: " << std::dec + << meta.finalizedBlockHeight << ", unfinalizedBlockHeight: " << std::dec << meta.unfinalizedBlockHeight << "}"; return os; } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp index c43b2df9a088..332e0493ea6f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp @@ -78,7 +78,7 @@ template struct LeafUpdateWitnessData { template struct AddIndexedDataResponse { AddDataResponse add_data_result; fr_sibling_path subtree_path; - std::shared_ptr>> sorted_leaves; + std::shared_ptr>> sorted_leaves; std::shared_ptr>> low_leaf_witness_data; AddIndexedDataResponse() = default; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp index 8116c0483df0..844440244103 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp @@ -17,7 +17,7 @@ namespace bb::crypto::merkle_tree { class Signal { public: Signal(uint32_t initial_level = 1) - : signal_(initial_level){}; + : signal_(initial_level) {}; ~Signal() = default; Signal(const Signal& other) : signal_(other.signal_.load()) diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.cpp b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.cpp index 3d15f750402a..8c4bc25d713e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.cpp @@ -16,38 +16,5 @@ typename Poseidon2::FF Poseidon2::hash(const std::vector -typename Poseidon2::FF Poseidon2::hash_buffer(const std::vector& input) -{ - const size_t num_bytes = input.size(); - const size_t bytes_per_element = 31; - size_t num_elements = static_cast(num_bytes % bytes_per_element != 0) + (num_bytes / bytes_per_element); - - const auto slice = [](const std::vector& data, const size_t start, const size_t slice_size) { - uint256_t result(0); - for (size_t i = 0; i < slice_size; ++i) { - result = (result << uint256_t(8)); - result += uint256_t(data[i + start]); - } - return FF(result); - }; - - std::vector converted; - for (size_t i = 0; i < num_elements - 1; ++i) { - size_t bytes_to_slice = bytes_per_element; - FF element = slice(input, i * bytes_per_element, bytes_to_slice); - converted.emplace_back(element); - } - size_t bytes_to_slice = num_bytes - ((num_elements - 1) * bytes_per_element); - FF element = slice(input, (num_elements - 1) * bytes_per_element, bytes_to_slice); - converted.emplace_back(element); - - return hash(converted); -} - template class Poseidon2; } // namespace bb::crypto diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.hpp b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.hpp index 4c72e7b06057..892f38871e8c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.hpp @@ -23,11 +23,6 @@ template class Poseidon2 { * @brief Hashes a vector of field elements */ static FF hash(const std::vector& input); - /** - * @brief Hashes vector of bytes by chunking it into 31 byte field elements and calling hash() - * @details Slice function cuts out the required number of bytes from the byte vector - */ - static FF hash_buffer(const std::vector& input); }; extern template class Poseidon2; diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.test.cpp index c18da87b739c..da113aec2d76 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2.test.cpp @@ -45,22 +45,3 @@ TEST(Poseidon2, HashConsistencyCheck) EXPECT_EQ(result, expected); } - -TEST(Poseidon2, HashBufferConsistencyCheck) -{ - // 31 byte inputs because hash_buffer slicing is only injective with 31 bytes, as it slices 31 bytes for each field - // element - fr a(std::string("00000b615c4d3e2fa0b1c2d3e4f56789fedcba9876543210abcdef0123456789")); - - // takes field element and converts it to 32 bytes - auto input_vec = to_buffer(a); - bb::fr result1 = crypto::Poseidon2::hash_buffer(input_vec); - input_vec.erase(input_vec.begin()); // erase first byte since we want 31 bytes - fr result2 = crypto::Poseidon2::hash_buffer(input_vec); - - std::vector input{ a }; - auto expected = crypto::Poseidon2::hash(input); - - EXPECT_NE(result1, expected); - EXPECT_EQ(result2, expected); -} diff --git a/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt index ed9596c1f75b..f8731ad1c5ea 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt @@ -6,7 +6,8 @@ set(DSL_DEPENDENCIES stdlib_keccak stdlib_poseidon2 stdlib_schnorr - stdlib_honk_verifier) + stdlib_honk_verifier + stdlib_client_ivc_verifier) if (NOT DISABLE_AZTEC_VM) list(APPEND DSL_DEPENDENCIES vm2) diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp index 1a056a592e41..8ca8bf7d1da0 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp @@ -7,16 +7,20 @@ #include "acir_format.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/log.hpp" -#include "barretenberg/common/op_count.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/dsl/acir_format/civc_recursion_constraints.hpp" +#include "barretenberg/dsl/acir_format/ecdsa_constraints.hpp" #include "barretenberg/dsl/acir_format/honk_recursion_constraint.hpp" -#include "barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp" +#include "barretenberg/dsl/acir_format/pg_recursion_constraint.hpp" #include "barretenberg/dsl/acir_format/proof_surgeon.hpp" #include "barretenberg/flavor/flavor.hpp" -#include "barretenberg/honk/proving_key_inspector.hpp" +#include "barretenberg/honk/prover_instance_inspector.hpp" #include "barretenberg/stdlib/eccvm_verifier/verifier_commitment_key.hpp" #include "barretenberg/stdlib/primitives/curves/grumpkin.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" #include "barretenberg/stdlib/primitives/field/field_conversion.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" @@ -31,14 +35,16 @@ namespace acir_format { using namespace bb; -template class DSLBigInts; -template class DSLBigInts; +template +void perform_full_IPA_verification(Builder& builder, + const std::vector>>& nested_ipa_claims, + const std::vector>& nested_ipa_proofs); template -void handle_IPA_accumulation(Builder& builder, - const std::vector>>& nested_ipa_claims, - const std::vector>& nested_ipa_proofs, - bool is_root_rollup); +std::pair>, HonkProof> handle_IPA_accumulation( + Builder& builder, + const std::vector>>& nested_ipa_claims, + const std::vector>& nested_ipa_proofs); template struct HonkRecursionConstraintsOutput { using PairingPoints = stdlib::recursion::PairingPoints; @@ -46,6 +52,33 @@ template struct HonkRecursionConstraintsOutput { std::vector>> nested_ipa_claims; std::vector> nested_ipa_proofs; bool is_root_rollup = false; + + template + void update(T& other, bool update_ipa_data) + requires(std::is_same_v> || + std::is_same_v>) + { + // Update points accumulator + if (this->points_accumulator.has_data) { + this->points_accumulator.aggregate(other.points_accumulator); + } else { + this->points_accumulator = other.points_accumulator; + } + + if (update_ipa_data) { + if constexpr (std::is_same_v>) { + // Update ipa proofs and claims + this->nested_ipa_proofs.push_back(other.ipa_proof); + this->nested_ipa_claims.push_back(other.ipa_claim); + } else { + // Update ipa proofs and claims (if other has no proofs/claims, we are not appending anything) + this->nested_ipa_proofs.insert( + this->nested_ipa_proofs.end(), other.nested_ipa_proofs.begin(), other.nested_ipa_proofs.end()); + this->nested_ipa_claims.insert( + this->nested_ipa_claims.end(), other.nested_ipa_claims.begin(), other.nested_ipa_claims.end()); + } + } + } }; template @@ -152,7 +185,7 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta // Add ECDSA k1 constraints for (size_t i = 0; i < constraint_system.ecdsa_k1_constraints.size(); ++i) { const auto& constraint = constraint_system.ecdsa_k1_constraints.at(i); - create_ecdsa_k1_verify_constraints(builder, constraint, has_valid_witness_assignments); + create_ecdsa_verify_constraints>(builder, constraint, has_valid_witness_assignments); gate_counter.track_diff(constraint_system.gates_per_opcode, constraint_system.original_opcode_indices.ecdsa_k1_constraints.at(i)); } @@ -160,7 +193,7 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta // Add ECDSA r1 constraints for (size_t i = 0; i < constraint_system.ecdsa_r1_constraints.size(); ++i) { const auto& constraint = constraint_system.ecdsa_r1_constraints.at(i); - create_ecdsa_r1_verify_constraints(builder, constraint, has_valid_witness_assignments); + create_ecdsa_verify_constraints>(builder, constraint, has_valid_witness_assignments); gate_counter.track_diff(constraint_system.gates_per_opcode, constraint_system.original_opcode_indices.ecdsa_r1_constraints.at(i)); } @@ -225,30 +258,6 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta } } - // Add big_int constraints - DSLBigInts dsl_bigints; - dsl_bigints.set_builder(&builder); - for (size_t i = 0; i < constraint_system.bigint_from_le_bytes_constraints.size(); ++i) { - const auto& constraint = constraint_system.bigint_from_le_bytes_constraints.at(i); - create_bigint_from_le_bytes_constraint(builder, constraint, dsl_bigints); - gate_counter.track_diff(constraint_system.gates_per_opcode, - constraint_system.original_opcode_indices.bigint_from_le_bytes_constraints.at(i)); - } - - for (size_t i = 0; i < constraint_system.bigint_operations.size(); ++i) { - const auto& constraint = constraint_system.bigint_operations[i]; - create_bigint_operations_constraint(constraint, dsl_bigints, has_valid_witness_assignments); - gate_counter.track_diff(constraint_system.gates_per_opcode, - constraint_system.original_opcode_indices.bigint_operations[i]); - } - - for (size_t i = 0; i < constraint_system.bigint_to_le_bytes_constraints.size(); ++i) { - const auto& constraint = constraint_system.bigint_to_le_bytes_constraints.at(i); - create_bigint_to_le_bytes_constraint(builder, constraint, dsl_bigints); - gate_counter.track_diff(constraint_system.gates_per_opcode, - constraint_system.original_opcode_indices.bigint_to_le_bytes_constraints.at(i)); - } - // assert equals for (size_t i = 0; i < constraint_system.assert_equalities.size(); ++i) { const auto& constraint = constraint_system.assert_equalities.at(i); @@ -258,79 +267,166 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta } // RecursionConstraints + bool has_honk_recursion_constraints = !constraint_system.honk_recursion_constraints.empty(); + bool has_avm_recursion_constraints = !constraint_system.avm_recursion_constraints.empty(); + bool has_pg_recursion_constraints = !constraint_system.pg_recursion_constraints.empty(); + bool has_civc_recursion_constraints = !constraint_system.civc_recursion_constraints.empty(); + if constexpr (IsMegaBuilder) { - if (!constraint_system.honk_recursion_constraints.empty()) { + // We shouldn't have both honk recursion constraints and pg recursion constraints. + BB_ASSERT_EQ(!has_honk_recursion_constraints || !has_pg_recursion_constraints, + true, + "Invalid circuit: both honk and ivc recursion constraints present."); + + // AVM constraints are not handled when using MegaBuilder + if (has_avm_recursion_constraints) { + info("WARNING: this circuit contains unhandled avm_recursion_constraints!"); + } + + if (has_honk_recursion_constraints) { HonkRecursionConstraintsOutput output = process_honk_recursion_constraints( builder, constraint_system, has_valid_witness_assignments, gate_counter); - output.points_accumulator.set_public(); + + // Propagate pairing points + stdlib::recursion::honk::DefaultIO inputs; + inputs.pairing_inputs = output.points_accumulator; + inputs.set_public(); + } else if (has_pg_recursion_constraints) { + process_pg_recursion_constraints( + builder, constraint_system, metadata.ivc, has_valid_witness_assignments, gate_counter); + } else { + // If its an app circuit that has no recursion constraints, add default pairing points to public inputs. + stdlib::recursion::honk::AppIO::add_default(builder); } - if (!constraint_system.avm_recursion_constraints.empty()) { - info("WARNING: this circuit contains unhandled avm_recursion_constraints!"); + } else { + bool is_recursive_circuit = metadata.honk_recursion != 0; + bool has_pairing_points = + has_honk_recursion_constraints || has_civc_recursion_constraints || has_avm_recursion_constraints; + + // We only handle: + // - CIVC recursion constraints (Private Base Rollup) + // - HONK + AVM recursion constraints (Public Base Rollup) + // - HONK recursion constraints + // - AVM recursion constraints + // However, as mock protocol circuits use CIVC + AVM (mock Public Base Rollup), instead of throwing an assert we + // return a vinfo for the case of CIVC + AVM + BB_ASSERT_EQ(has_pg_recursion_constraints, + false, + "Invalid circuit: pg recursion constraints are present with UltraBuilder."); + BB_ASSERT_EQ(!(has_civc_recursion_constraints && has_honk_recursion_constraints), + true, + "Invalid circuit: both honk and civc recursion constraints are present."); + BB_ASSERT_EQ( + !(has_honk_recursion_constraints || has_civc_recursion_constraints || has_avm_recursion_constraints) || + is_recursive_circuit, + true, + "Invalid circuit: honk, civc, or avm recursion constraints present but the circuit is not recursive."); + if (has_civc_recursion_constraints && has_avm_recursion_constraints) { + vinfo("WARNING: both civc and avm recursion constraints are present. While we support this combination, we " + "expect to see it only in a mock " + "circuit."); } - if (!constraint_system.ivc_recursion_constraints.empty()) { - process_ivc_recursion_constraints( - builder, constraint_system, metadata.ivc, has_valid_witness_assignments, gate_counter); + + // Container for data to be propagated + HonkRecursionConstraintsOutput honk_output; + + if (has_honk_recursion_constraints) { + honk_output = process_honk_recursion_constraints( + builder, constraint_system, has_valid_witness_assignments, gate_counter); } - // We shouldn't have both honk recursion constraints and ivc recursion constraints. - ASSERT(constraint_system.honk_recursion_constraints.empty() || - constraint_system.ivc_recursion_constraints.empty(), - "Invalid circuit: both honk and ivc recursion constraints present."); - // If its an app circuit that has no recursion constraints, add default pairing points to public inputs. - if (constraint_system.honk_recursion_constraints.empty() && - constraint_system.ivc_recursion_constraints.empty()) { - stdlib::recursion::honk::AppIO::add_default(builder); + if (has_civc_recursion_constraints) { + honk_output = process_civc_recursion_constraints( + builder, constraint_system, has_valid_witness_assignments, gate_counter); } - } else { - HonkRecursionConstraintsOutput honk_output = - process_honk_recursion_constraints(builder, constraint_system, has_valid_witness_assignments, gate_counter); #ifndef DISABLE_AZTEC_VM - HonkRecursionConstraintsOutput avm_output = - process_avm_recursion_constraints(builder, constraint_system, has_valid_witness_assignments, gate_counter); - - // This is a little annoying, but we should probably be explicit about these things so that its obvious how the - // pairing points are being aggregated. - if (honk_output.points_accumulator.has_data) { - if (avm_output.points_accumulator.has_data) { - honk_output.points_accumulator.aggregate(avm_output.points_accumulator); - } - } else { - if (avm_output.points_accumulator.has_data) { - honk_output.points_accumulator = avm_output.points_accumulator; - } + if (has_avm_recursion_constraints) { + HonkRecursionConstraintsOutput avm_output = process_avm_recursion_constraints( + builder, constraint_system, has_valid_witness_assignments, gate_counter); + + // Update honk_output: append (potentially 0) ipa claims and proofs. + // If honk output has points accumulator, aggregate it with the one coming from the avm. Otherwise, override + // it with the avm's one. + honk_output.update(avm_output, /*update_ipa_data=*/!avm_output.nested_ipa_claims.empty()); } - // Append the (potentially 0) ipa claims and proofs to honk_output - honk_output.nested_ipa_claims.insert(honk_output.nested_ipa_claims.end(), - avm_output.nested_ipa_claims.begin(), - avm_output.nested_ipa_claims.end()); - honk_output.nested_ipa_proofs.insert(honk_output.nested_ipa_proofs.end(), - avm_output.nested_ipa_proofs.begin(), - avm_output.nested_ipa_proofs.end()); #endif - // If the circuit has either honk or avm recursion constraints, add the aggregation object. Otherwise, add a - // default one if the circuit is recursive and honk_recursion is true. - if (!constraint_system.honk_recursion_constraints.empty() || - !constraint_system.avm_recursion_constraints.empty()) { - ASSERT(metadata.honk_recursion != 0); - honk_output.points_accumulator.set_public(); - } else if (metadata.honk_recursion != 0) { - // Make sure the verification key records the public input indices of the - // final recursion output. - PairingPoints::add_default_to_public_inputs(builder); - } - // Accumulate the IPA claims and set it to be public inputs - // Either we're proving with RollupHonk (honk_recursion=2) or its the root rollup. - if (metadata.honk_recursion == 2 || honk_output.is_root_rollup) { - handle_IPA_accumulation( - builder, honk_output.nested_ipa_claims, honk_output.nested_ipa_proofs, honk_output.is_root_rollup); + if (metadata.honk_recursion == 2) { + // Proving with UltraRollupFlavor + + // Propagate pairing points + if (has_pairing_points) { + honk_output.points_accumulator.set_public(); + } else { + PairingPoints::add_default_to_public_inputs(builder); + } + + // Handle IPA + auto [ipa_claim, ipa_proof] = + handle_IPA_accumulation(builder, honk_output.nested_ipa_claims, honk_output.nested_ipa_proofs); + + // Set proof + builder.ipa_proof = ipa_proof; + + // Propagate IPA claim + ipa_claim.set_public(); } else { - // We shouldn't accidentally have IPA proofs otherwise. - BB_ASSERT_EQ( - honk_output.nested_ipa_proofs.size(), static_cast(0), "IPA proofs present when not expected."); + // If it is a recursive circuit, propagate pairing points + if (metadata.honk_recursion == 1) { + using IO = bb::stdlib::recursion::honk::DefaultIO; + + if (has_pairing_points) { + IO inputs; + inputs.pairing_inputs = honk_output.points_accumulator; + inputs.set_public(); + } else { + IO::add_default(builder); + } + } + + // Handle IPA + if (honk_output.is_root_rollup) { + perform_full_IPA_verification(builder, honk_output.nested_ipa_claims, honk_output.nested_ipa_proofs); + } else { + // We shouldn't accidentally have IPA proofs otherwise. + BB_ASSERT_EQ(honk_output.nested_ipa_proofs.size(), + static_cast(0), + "IPA proofs present when not expected."); + } } } +} // namespace acir_format + +/** + * @brief Perform full recursive IPA verification + * + * @tparam Builder + * @param builder + * @param nested_ipa_claims + * @param nested_ipa_proofs + */ +template +void perform_full_IPA_verification(Builder& builder, + const std::vector>>& nested_ipa_claims, + const std::vector>& nested_ipa_proofs) +{ + using StdlibTranscript = bb::stdlib::recursion::honk::UltraStdlibTranscript; + + BB_ASSERT_EQ( + nested_ipa_claims.size(), nested_ipa_proofs.size(), "Mismatched number of nested IPA claims and proofs."); + BB_ASSERT_EQ(nested_ipa_claims.size(), 2U, "Root rollup must have two nested IPA claims."); + + auto [ipa_claim, ipa_proof] = handle_IPA_accumulation(builder, nested_ipa_claims, nested_ipa_proofs); + + // IPA verification + VerifierCommitmentKey> verifier_commitment_key( + &builder, 1 << CONST_ECCVM_LOG_N, VerifierCommitmentKey(1 << CONST_ECCVM_LOG_N)); + + auto accumulated_ipa_transcript = std::make_shared(); + accumulated_ipa_transcript->load_proof(stdlib::Proof(builder, ipa_proof)); + IPA>::full_verify_recursive( + verifier_commitment_key, ipa_claim, accumulated_ipa_transcript); } /** @@ -340,21 +436,19 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta * @param builder * @param nested_ipa_claims * @param nested_ipa_proofs - * @param is_root_rollup */ template -void handle_IPA_accumulation(Builder& builder, - const std::vector>>& nested_ipa_claims, - const std::vector>& nested_ipa_proofs, - bool is_root_rollup) +std::pair>, HonkProof> handle_IPA_accumulation( + Builder& builder, + const std::vector>>& nested_ipa_claims, + const std::vector>& nested_ipa_proofs) { BB_ASSERT_EQ( nested_ipa_claims.size(), nested_ipa_proofs.size(), "Mismatched number of nested IPA claims and proofs."); + OpeningClaim> final_ipa_claim; HonkProof final_ipa_proof; - if (is_root_rollup) { - BB_ASSERT_EQ(nested_ipa_claims.size(), 2U, "Root rollup must have two nested IPA claims."); - } + if (nested_ipa_claims.size() == 2) { // If we have two claims, accumulate. CommitmentKey commitment_key(1 << CONST_ECCVM_LOG_N); @@ -366,31 +460,21 @@ void handle_IPA_accumulation(Builder& builder, ipa_transcript_2->load_proof(nested_ipa_proofs[1]); auto [ipa_claim, ipa_proof] = IPA>::accumulate( commitment_key, ipa_transcript_1, nested_ipa_claims[0], ipa_transcript_2, nested_ipa_claims[1]); - // If this is the root rollup, do full IPA verification - if (is_root_rollup) { - VerifierCommitmentKey> verifier_commitment_key( - &builder, 1 << CONST_ECCVM_LOG_N, VerifierCommitmentKey(1 << CONST_ECCVM_LOG_N)); - // do full IPA verification - auto accumulated_ipa_transcript = std::make_shared(); - accumulated_ipa_transcript->load_proof(stdlib::Proof(builder, ipa_proof)); - IPA>::full_verify_recursive( - verifier_commitment_key, ipa_claim, accumulated_ipa_transcript); - } else { - final_ipa_claim = ipa_claim; - final_ipa_proof = ipa_proof; - } + + final_ipa_claim = ipa_claim; + final_ipa_proof = ipa_proof; } else if (nested_ipa_claims.size() == 1) { // If we have one claim, just forward it along. final_ipa_claim = nested_ipa_claims[0]; // This conversion looks suspicious but there's no need to make this an output of the circuit since // its a proof that will be checked anyway. final_ipa_proof = nested_ipa_proofs[0].get_value(); - } else if (nested_ipa_claims.size() == 0) { + } else if (nested_ipa_claims.empty()) { // If we don't have any claims, we may need to inject a fake one if we're proving with // UltraRollupHonk, indicated by the manual setting of the honk_recursion metadata to 2. info("Proving with UltraRollupHonk but no IPA claims exist."); auto [stdlib_opening_claim, ipa_proof] = - IPA>::create_fake_ipa_claim_and_proof(builder); + IPA>::create_random_valid_ipa_claim_and_proof(builder); final_ipa_claim = stdlib_opening_claim; final_ipa_proof = ipa_proof; @@ -398,15 +482,11 @@ void handle_IPA_accumulation(Builder& builder, // We don't support and shouldn't expect to support circuits with 3+ IPA recursive verifiers. throw_or_abort("Too many nested IPA claims to accumulate"); } - // If we aren't in the root rollup, we should have an output IPA proof. - if (!is_root_rollup) { - BB_ASSERT_EQ(final_ipa_proof.size(), IPA_PROOF_LENGTH); - // Propagate the IPA claim via the public inputs of the outer circuit - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1306): Determine the right - // location/entity to handle this IPA data propagation. - final_ipa_claim.set_public(); - builder.ipa_proof = final_ipa_proof; - } + + BB_ASSERT_EQ(final_ipa_proof.size(), IPA_PROOF_LENGTH); + + // Return the IPA claim and proof + return { final_ipa_claim, final_ipa_proof }; } template @@ -420,46 +500,31 @@ process_honk_recursion_constraints(Builder& builder, // Add recursion constraints size_t idx = 0; for (auto& constraint : constraint_system.honk_recursion_constraints) { + HonkRecursionConstraintOutput honk_recursion_constraint; + if (constraint.proof_type == HONK_ZK) { - auto honk_recursion_constraint = create_honk_recursion_constraints>( + honk_recursion_constraint = create_honk_recursion_constraints>( builder, constraint, has_valid_witness_assignments); - - if (output.points_accumulator.has_data) { - output.points_accumulator.aggregate(honk_recursion_constraint.points_accumulator); - } else { - output.points_accumulator = honk_recursion_constraint.points_accumulator; - } - } else if (constraint.proof_type == HONK) { - auto honk_recursion_constraint = create_honk_recursion_constraints>( + honk_recursion_constraint = create_honk_recursion_constraints>( builder, constraint, has_valid_witness_assignments); - if (output.points_accumulator.has_data) { - output.points_accumulator.aggregate(honk_recursion_constraint.points_accumulator); - } else { - output.points_accumulator = honk_recursion_constraint.points_accumulator; - } } else if (constraint.proof_type == ROLLUP_HONK || constraint.proof_type == ROOT_ROLLUP_HONK) { if constexpr (!IsUltraBuilder) { throw_or_abort("Rollup Honk proof type not supported on MegaBuilder"); } else { - if (constraint.proof_type == ROOT_ROLLUP_HONK) { - output.is_root_rollup = true; - } - auto honk_recursion_constraint = - create_honk_recursion_constraints>( - builder, constraint, has_valid_witness_assignments); - if (output.points_accumulator.has_data) { - output.points_accumulator.aggregate(honk_recursion_constraint.points_accumulator); - } else { - output.points_accumulator = honk_recursion_constraint.points_accumulator; - } - output.nested_ipa_claims.push_back(honk_recursion_constraint.ipa_claim); - output.nested_ipa_proofs.push_back(honk_recursion_constraint.ipa_proof); + honk_recursion_constraint = create_honk_recursion_constraints>( + builder, constraint, has_valid_witness_assignments); } } else { throw_or_abort("Invalid Honk proof type"); } + // Update output + output.update(honk_recursion_constraint, + /*update_ipa_data=*/constraint.proof_type == ROLLUP_HONK || + constraint.proof_type == ROOT_ROLLUP_HONK); + output.is_root_rollup = constraint.proof_type == ROOT_ROLLUP_HONK; + gate_counter.track_diff(constraint_system.gates_per_opcode, constraint_system.original_opcode_indices.honk_recursion_constraints.at(idx++)); } @@ -468,11 +533,11 @@ process_honk_recursion_constraints(Builder& builder, return output; } -void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, - AcirFormat& constraints, - std::shared_ptr ivc, - bool has_valid_witness_assignments, - GateCounter& gate_counter) +void process_pg_recursion_constraints(MegaCircuitBuilder& builder, + AcirFormat& constraints, + std::shared_ptr ivc, + bool has_valid_witness_assignments, + GateCounter& gate_counter) { using StdlibVerificationKey = ClientIVC::RecursiveVerificationKey; using StdlibVKAndHash = ClientIVC::RecursiveVKAndHash; @@ -481,11 +546,11 @@ void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, // If an ivc instance is not provided, we mock one with the state required to construct the recursion // constraints present in the program. This is for when we write_vk. if (ivc == nullptr) { - ivc = create_mock_ivc_from_constraints(constraints.ivc_recursion_constraints, { AZTEC_TRACE_STRUCTURE }); + ivc = create_mock_ivc_from_constraints(constraints.pg_recursion_constraints, { AZTEC_TRACE_STRUCTURE }); } // We expect the length of the internal verification queue to match the number of ivc recursion constraints - BB_ASSERT_EQ(constraints.ivc_recursion_constraints.size(), + BB_ASSERT_EQ(constraints.pg_recursion_constraints.size(), ivc->verification_queue.size(), "WARNING: Mismatch in number of recursive verifications during kernel creation!"); @@ -493,8 +558,7 @@ void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, // that the present kernel circuit is constructed correctly. (Used for constructing VKs without witnesses). if (!has_valid_witness_assignments) { // Create stdlib representations of each {proof, vkey} pair to be recursively verified - for (auto [constraint, queue_entry] : - zip_view(constraints.ivc_recursion_constraints, ivc->verification_queue)) { + for (auto [constraint, queue_entry] : zip_view(constraints.pg_recursion_constraints, ivc->verification_queue)) { populate_dummy_vk_in_constraint(builder, queue_entry.honk_vk, constraint.key); builder.set_variable(constraint.key_hash, queue_entry.honk_vk->hash()); } @@ -502,8 +566,8 @@ void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, // Construct a stdlib verification key for each constraint based on the verification key witness indices therein std::vector> stdlib_vk_and_hashs; - stdlib_vk_and_hashs.reserve(constraints.ivc_recursion_constraints.size()); - for (const auto& constraint : constraints.ivc_recursion_constraints) { + stdlib_vk_and_hashs.reserve(constraints.pg_recursion_constraints.size()); + for (const auto& constraint : constraints.pg_recursion_constraints) { stdlib_vk_and_hashs.push_back( std::make_shared(std::make_shared( StdlibVerificationKey::from_witness_indices(builder, constraint.key)), @@ -516,11 +580,12 @@ void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, // internal verification queue. This ensures that the witnesses utilized in constraints generated based on acir // are properly connected to the constraints generated herein via the ivc scheme (e.g. recursive verifications). for (auto [constraint, queue_entry] : - zip_view(constraints.ivc_recursion_constraints, ivc->stdlib_verification_queue)) { + zip_view(constraints.pg_recursion_constraints, ivc->stdlib_verification_queue)) { // Get the witness indices for the public inputs contained within the proof in the verification queue - std::vector public_input_indices = ProofSurgeon::get_public_inputs_witness_indices_from_proof( - queue_entry.proof, constraint.public_inputs.size()); + std::vector public_input_indices = + ProofSurgeon::get_public_inputs_witness_indices_from_proof(queue_entry.proof, + constraint.public_inputs.size()); // Assert equality between the internal public input witness indices and those in the acir constraint for (auto [witness_idx, constraint_witness_idx] : zip_view(public_input_indices, constraint.public_inputs)) { @@ -531,10 +596,33 @@ void process_ivc_recursion_constraints(MegaCircuitBuilder& builder, // Complete the kernel circuit with all required recursive verifications, databus consistency checks etc. ivc->complete_kernel_circuit_logic(builder); - // Note: we can't easily track the gate contribution from each individual ivc_recursion_constraint since they + // Note: we can't easily track the gate contribution from each individual pg_recursion_constraint since they // are handled simultaneously in the above function call; instead we track the total contribution gate_counter.track_diff(constraints.gates_per_opcode, - constraints.original_opcode_indices.ivc_recursion_constraints.at(0)); + constraints.original_opcode_indices.pg_recursion_constraints.at(0)); +} + +[[nodiscard("IPA claim and Pairing points should be accumulated")]] HonkRecursionConstraintsOutput +process_civc_recursion_constraints(Builder& builder, + AcirFormat& constraint_system, + bool has_valid_witness_assignments, + GateCounter& gate_counter) +{ + HonkRecursionConstraintsOutput output; + // Add recursion constraints + size_t idx = 0; + for (auto& constraint : constraint_system.civc_recursion_constraints) { + HonkRecursionConstraintOutput honk_output = + create_civc_recursion_constraints(builder, constraint, has_valid_witness_assignments); + + // Update the output + output.update(honk_output, /*update_ipa_data=*/true); + + gate_counter.track_diff(constraint_system.gates_per_opcode, + constraint_system.original_opcode_indices.civc_recursion_constraints.at(idx++)); + } + + return output; } #ifndef DISABLE_AZTEC_VM @@ -548,17 +636,11 @@ process_avm_recursion_constraints(Builder& builder, // Add recursion constraints size_t idx = 0; for (auto& constraint : constraint_system.avm_recursion_constraints) { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1303): Utilize the version of this method that - // employs the Goblinized AVM recursive verifier. HonkRecursionConstraintOutput avm2_recursion_output = create_avm2_recursion_constraints_goblin(builder, constraint, has_valid_witness_assignments); - if (output.points_accumulator.has_data) { - output.points_accumulator.aggregate(avm2_recursion_output.points_accumulator); - } else { - output.points_accumulator = avm2_recursion_output.points_accumulator; - } - output.nested_ipa_claims.push_back(avm2_recursion_output.ipa_claim); - output.nested_ipa_proofs.push_back(avm2_recursion_output.ipa_proof); + + // Update the output + output.update(avm2_recursion_output, /*update_ipa_data=*/true); gate_counter.track_diff(constraint_system.gates_per_opcode, constraint_system.original_opcode_indices.avm_recursion_constraints.at(idx++)); @@ -575,11 +657,11 @@ process_avm_recursion_constraints(Builder& builder, */ template <> UltraCircuitBuilder create_circuit(AcirProgram& program, const ProgramMetadata& metadata) { - PROFILE_THIS(); + BB_BENCH(); AcirFormat& constraints = program.constraints; WitnessVector& witness = program.witness; - Builder builder{ metadata.size_hint, witness, constraints.public_inputs, constraints.varnum, metadata.recursive }; + Builder builder{ metadata.size_hint, witness, constraints.public_inputs, constraints.varnum }; build_constraints(builder, program, metadata); @@ -596,7 +678,7 @@ template <> UltraCircuitBuilder create_circuit(AcirProgram& program, const Progr */ template <> MegaCircuitBuilder create_circuit(AcirProgram& program, const ProgramMetadata& metadata) { - PROFILE_THIS(); + BB_BENCH(); AcirFormat& constraints = program.constraints; WitnessVector& witness = program.witness; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.hpp index 810a679a750f..fd42bf57d8cf 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.hpp @@ -14,13 +14,11 @@ #include "barretenberg/client_ivc/client_ivc.hpp" #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/serialize/msgpack.hpp" -#include "bigint_constraint.hpp" #include "blake2s_constraint.hpp" #include "blake3_constraint.hpp" #include "block_constraint.hpp" #include "ec_operations.hpp" -#include "ecdsa_secp256k1.hpp" -#include "ecdsa_secp256r1.hpp" +#include "ecdsa_constraints.hpp" #include "honk_recursion_constraint.hpp" #include "keccak_constraint.hpp" #include "logic_constraint.hpp" @@ -56,10 +54,8 @@ struct AcirFormatOriginalOpcodeIndices { std::vector ec_add_constraints; std::vector honk_recursion_constraints; std::vector avm_recursion_constraints; - std::vector ivc_recursion_constraints; - std::vector bigint_from_le_bytes_constraints; - std::vector bigint_to_le_bytes_constraints; - std::vector bigint_operations; + std::vector pg_recursion_constraints; + std::vector civc_recursion_constraints; std::vector assert_equalities; std::vector poly_triple_constraints; std::vector quad_constraints; @@ -88,8 +84,8 @@ struct AcirFormat { std::vector range_constraints; std::vector aes128_constraints; std::vector sha256_compression; - std::vector ecdsa_k1_constraints; - std::vector ecdsa_r1_constraints; + std::vector ecdsa_k1_constraints; + std::vector ecdsa_r1_constraints; std::vector blake2s_constraints; std::vector blake3_constraints; std::vector keccak_permutations; @@ -98,10 +94,8 @@ struct AcirFormat { std::vector ec_add_constraints; std::vector honk_recursion_constraints; std::vector avm_recursion_constraints; - std::vector ivc_recursion_constraints; - std::vector bigint_from_le_bytes_constraints; - std::vector bigint_to_le_bytes_constraints; - std::vector bigint_operations; + std::vector pg_recursion_constraints; + std::vector civc_recursion_constraints; std::vector> assert_equalities; // A standard plonk arithmetic constraint, as defined in the poly_triple struct, consists of selector values @@ -119,14 +113,14 @@ struct AcirFormat { // Number of gates added to the circuit per original opcode. // Has length equal to num_acir_opcodes. - std::vector gates_per_opcode = {}; + std::vector gates_per_opcode; // Set of constrained witnesses - std::set constrained_witness = {}; + std::set constrained_witness; // map witness with their minimal bit-range - std::map minimal_range = {}; + std::map minimal_range; // map witness with their minimal bit-range implied by array operations - std::map index_range = {}; + std::map index_range; // Indices of the original opcode that originated each constraint in AcirFormat. AcirFormatOriginalOpcodeIndices original_opcode_indices; @@ -148,14 +142,12 @@ struct AcirFormat { ec_add_constraints, honk_recursion_constraints, avm_recursion_constraints, - ivc_recursion_constraints, + pg_recursion_constraints, + civc_recursion_constraints, poly_triple_constraints, quad_constraints, big_quad_constraints, block_constraints, - bigint_from_le_bytes_constraints, - bigint_to_le_bytes_constraints, - bigint_operations, assert_equalities); friend bool operator==(AcirFormat const& lhs, AcirFormat const& rhs) = default; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.test.cpp index 5bb4b1f9785b..c97cb61ce74e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.test.cpp @@ -8,7 +8,6 @@ #include "barretenberg/op_queue/ecc_op_queue.hpp" #include "barretenberg/serialize/test_helper.hpp" -#include "ecdsa_secp256k1.hpp" using namespace bb; using namespace bb::crypto; @@ -36,29 +35,7 @@ TEST_F(AcirFormatTests, TestASingleConstraintNoPubInputs) .varnum = 4, .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { constraint }, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -151,27 +128,7 @@ TEST_F(AcirFormatTests, TestLogicGateFromNoirCircuit) .public_inputs = { 1 }, .logic_constraints = { logic_constraint }, .range_constraints = { range_a, range_b }, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { expr_a, expr_b, expr_c, expr_d }, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -225,29 +182,7 @@ TEST_F(AcirFormatTests, TestKeccakPermutation) .varnum = 51, .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, .keccak_permutations = { keccak_permutation }, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -293,29 +228,7 @@ TEST_F(AcirFormatTests, TestCollectsGateCounts) .varnum = 4, .num_acir_opcodes = 2, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { first_gate, second_gate }, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -417,29 +330,8 @@ TEST_F(AcirFormatTests, TestBigAdd) .varnum = static_cast(num_variables + 1), .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { assert_equal }, - .quad_constraints = {}, .big_quad_constraints = { quad_constraint }, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format_mocks.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format_mocks.cpp index 98ee7d7b9537..d45163d3ad08 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format_mocks.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format_mocks.cpp @@ -23,10 +23,8 @@ acir_format::AcirFormatOriginalOpcodeIndices create_empty_original_opcode_indice .ec_add_constraints = {}, .honk_recursion_constraints = {}, .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, + .pg_recursion_constraints = {}, + .civc_recursion_constraints = {}, .assert_equalities = {}, .poly_triple_constraints = {}, .quad_constraints = {}, @@ -79,17 +77,11 @@ void mock_opcode_indices(acir_format::AcirFormat& constraint_system) for (size_t i = 0; i < constraint_system.avm_recursion_constraints.size(); i++) { constraint_system.original_opcode_indices.avm_recursion_constraints.push_back(current_opcode++); } - for (size_t i = 0; i < constraint_system.ivc_recursion_constraints.size(); i++) { - constraint_system.original_opcode_indices.ivc_recursion_constraints.push_back(current_opcode++); + for (size_t i = 0; i < constraint_system.pg_recursion_constraints.size(); i++) { + constraint_system.original_opcode_indices.pg_recursion_constraints.push_back(current_opcode++); } - for (size_t i = 0; i < constraint_system.bigint_from_le_bytes_constraints.size(); i++) { - constraint_system.original_opcode_indices.bigint_from_le_bytes_constraints.push_back(current_opcode++); - } - for (size_t i = 0; i < constraint_system.bigint_to_le_bytes_constraints.size(); i++) { - constraint_system.original_opcode_indices.bigint_to_le_bytes_constraints.push_back(current_opcode++); - } - for (size_t i = 0; i < constraint_system.bigint_operations.size(); i++) { - constraint_system.original_opcode_indices.bigint_operations.push_back(current_opcode++); + for (size_t i = 0; i < constraint_system.civc_recursion_constraints.size(); i++) { + constraint_system.original_opcode_indices.civc_recursion_constraints.push_back(current_opcode++); } for (size_t i = 0; i < constraint_system.assert_equalities.size(); i++) { constraint_system.original_opcode_indices.assert_equalities.push_back(current_opcode++); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_integration.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_integration.test.cpp index 421dffed8aa0..afe621936650 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_integration.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_integration.test.cpp @@ -1,12 +1,11 @@ #include "barretenberg/client_ivc/client_ivc.hpp" #ifndef __wasm__ -#include "barretenberg/api/exec_pipe.hpp" #include "barretenberg/circuit_checker/circuit_checker.hpp" #include "barretenberg/client_ivc/private_execution_steps.hpp" #include "barretenberg/common/streams.hpp" #include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp" -#include "barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp" -#include "barretenberg/honk/proving_key_inspector.hpp" +#include "barretenberg/dsl/acir_format/pg_recursion_constraint.hpp" +#include "barretenberg/honk/prover_instance_inspector.hpp" #include #include @@ -16,20 +15,6 @@ using namespace bb; class AcirIntegrationTest : public ::testing::Test { public: - static std::vector get_bytecode(const std::string& bytecodePath) - { - std::filesystem::path filePath = bytecodePath; - if (filePath.extension() == ".json") { - // Try reading json files as if they are a Nargo build artifact - std::string command = "jq -r '.bytecode' \"" + bytecodePath + "\" | base64 -d | gunzip -c"; - return exec_pipe(command); - } - - // For other extensions, assume file is a raw ACIR program - std::string command = "gunzip -c \"" + bytecodePath + "\""; - return exec_pipe(command); - } - // Function to check if a file exists static bool file_exists(const std::string& path) { @@ -61,15 +46,15 @@ class AcirIntegrationTest : public ::testing::Test { using Verifier = UltraVerifier_; using VerificationKey = Flavor::VerificationKey; - auto proving_key = std::make_shared>(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - Prover prover{ proving_key, verification_key }; + auto prover_instance = std::make_shared>(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + Prover prover{ prover_instance, verification_key }; #ifdef LOG_SIZES builder.blocks.summarize(); info("num gates = ", builder.get_estimated_num_finalized_gates()); info("total circuit size = ", builder.get_estimated_total_circuit_size()); - info("circuit size = ", prover.proving_key->dyadic_size()); - info("log circuit size = ", prover.proving_key->log_dyadic_size()); + info("circuit size = ", prover.prover_instance->dyadic_size()); + info("log circuit size = ", prover.prover_instance->log_dyadic_size()); #endif auto proof = prover.construct_proof(); @@ -100,7 +85,7 @@ class AcirIntegrationTest : public ::testing::Test { val_idx_1, val_idx_2, val_idx_3, - circuit.zero_idx, + circuit.zero_idx(), 1, 1, 1, @@ -511,7 +496,7 @@ TEST_F(AcirIntegrationTest, DISABLED_ClientIVCMsgpackInputs) std::shared_ptr ivc = steps.accumulate(); ClientIVC::Proof proof = ivc->prove(); - EXPECT_TRUE(ivc->verify(proof)); + EXPECT_TRUE(ivc->verify(proof, ivc->get_vk())); } /** @@ -538,27 +523,27 @@ TEST_F(AcirIntegrationTest, DISABLED_DummyWitnessVkConsistency) { auto program = program_in; program.witness = {}; // erase the witness to mimmic the "dummy witness" case - auto& ivc_constraints = program.constraints.ivc_recursion_constraints; + auto& ivc_constraints = program.constraints.pg_recursion_constraints; const acir_format::ProgramMetadata metadata{ .ivc = ivc_constraints.empty() ? nullptr : create_mock_ivc_from_constraints(ivc_constraints, trace_settings) }; auto circuit = acir_format::create_circuit(program, metadata); - recomputed_vk_hash = proving_key_inspector::compute_vk_hash(circuit); + recomputed_vk_hash = prover_instance_inspector::compute_vk_hash(circuit); } // Compute the verification key using the genuine witness { auto program = program_in; - auto& ivc_constraints = program.constraints.ivc_recursion_constraints; + auto& ivc_constraints = program.constraints.pg_recursion_constraints; const acir_format::ProgramMetadata metadata{ .ivc = ivc_constraints.empty() ? nullptr : create_mock_ivc_from_constraints(ivc_constraints, trace_settings) }; auto circuit = acir_format::create_circuit(program, metadata); - computed_vk_hash = proving_key_inspector::compute_vk_hash(circuit); + computed_vk_hash = prover_instance_inspector::compute_vk_hash(circuit); } // Check that the hashes computed from the dummy witness VK and the genuine witness VK are equal diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp index c34abea2c94a..5b4493f111f5 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp @@ -12,11 +12,12 @@ #include #include -#include "barretenberg/api/get_bytecode.hpp" #include "barretenberg/common/assert.hpp" #include "barretenberg/common/container.hpp" +#include "barretenberg/common/get_bytecode.hpp" #include "barretenberg/common/map.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/dsl/acir_format/ecdsa_constraints.hpp" #include "barretenberg/dsl/acir_format/recursion_constraint.hpp" #include "barretenberg/honk/execution_trace/gate_data.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" @@ -126,6 +127,18 @@ Witnesses::WitnessStack deserialize_witness_stack(std::vector&& buf) &Witnesses::WitnessStack::bincodeDeserialize); } +// TODO(tom): clean this up. +uint256_t from_be_bytes(std::vector const& bytes) +{ + BB_ASSERT_EQ(bytes.size(), 32U, "uint256 constructed from bytes array with invalid length"); + uint256_t result = 0; + for (uint8_t byte : bytes) { + result <<= 8; + result |= byte; + } + return result; +} + /** * @brief Construct a poly_tuple for a standard width-3 arithmetic gate from its acir representation * @@ -157,7 +170,7 @@ poly_triple serialize_arithmetic_gate(Acir::Expression const& arg) // Note: mul_terms are tuples of the form {selector_value, witness_idx_1, witness_idx_2} if (!arg.mul_terms.empty()) { const auto& mul_term = arg.mul_terms[0]; - pt.q_m = uint256_t(std::get<0>(mul_term)); + pt.q_m = from_be_bytes(std::get<0>(mul_term)); pt.a = std::get<1>(mul_term).value; pt.b = std::get<2>(mul_term).value; a_set = true; @@ -167,7 +180,7 @@ poly_triple serialize_arithmetic_gate(Acir::Expression const& arg) // If necessary, set values for linears terms q_l * w_l, q_r * w_r and q_o * w_o BB_ASSERT_LTE(arg.linear_combinations.size(), 3U, "We can only accommodate 3 linear terms"); for (const auto& linear_term : arg.linear_combinations) { - fr selector_value(uint256_t(std::get<0>(linear_term))); + fr selector_value(from_be_bytes(std::get<0>(linear_term))); uint32_t witness_idx = std::get<1>(linear_term).value; // If the witness index has not yet been set or if the corresponding linear term is active, set the witness @@ -199,7 +212,7 @@ poly_triple serialize_arithmetic_gate(Acir::Expression const& arg) } // Set constant value q_c - pt.q_c = uint256_t(arg.q_c); + pt.q_c = from_be_bytes(arg.q_c); return pt; } @@ -259,7 +272,7 @@ std::vector> split_into_mul_quad_gates(Acir::Expression const& arg .b_scaling = fr::zero(), .c_scaling = fr::zero(), .d_scaling = fr::zero(), - .const_scaling = fr(uint256_t(arg.q_c)) }; + .const_scaling = fr(from_be_bytes(arg.q_c)) }; // list of witnesses that are part of mul terms std::set all_mul_terms; @@ -275,7 +288,7 @@ std::vector> split_into_mul_quad_gates(Acir::Expression const& arg // we add a mul term (if there are some) to every intermediate gate if (current_mul_term != arg.mul_terms.end()) { - mul_gate.mul_scaling = fr(uint256_t(std::get<0>(*current_mul_term))); + mul_gate.mul_scaling = fr(from_be_bytes(std::get<0>(*current_mul_term))); mul_gate.a = std::get<1>(*current_mul_term).value; mul_gate.b = std::get<2>(*current_mul_term).value; mul_gate.a_scaling = fr::zero(); @@ -286,7 +299,7 @@ std::vector> split_into_mul_quad_gates(Acir::Expression const& arg auto w = std::get<1>(lin_term).value; if (w == mul_gate.a) { if (!processed_mul_terms.contains(mul_gate.a)) { - mul_gate.a_scaling = fr(uint256_t(std::get<0>(lin_term))); + mul_gate.a_scaling = fr(from_be_bytes(std::get<0>(lin_term))); processed_mul_terms.insert(w); } if (mul_gate.a == mul_gate.b) { @@ -294,7 +307,7 @@ std::vector> split_into_mul_quad_gates(Acir::Expression const& arg } } else if (w == mul_gate.b) { if (!processed_mul_terms.contains(mul_gate.b)) { - mul_gate.b_scaling = fr(uint256_t(std::get<0>(lin_term))); + mul_gate.b_scaling = fr(from_be_bytes(std::get<0>(lin_term))); processed_mul_terms.insert(w); } break; @@ -312,7 +325,8 @@ std::vector> split_into_mul_quad_gates(Acir::Expression const& arg auto w = std::get<1>(*current_linear_term).value; if (!all_mul_terms.contains(w)) { if (i < max_size) { - assign_linear_term(mul_gate, i, w, fr(uint256_t(std::get<0>(*current_linear_term)))); // * fr(-1))); + assign_linear_term( + mul_gate, i, w, fr(from_be_bytes(std::get<0>(*current_linear_term)))); // * fr(-1))); ++i; } else { // No more available wire, but there is still some linear terms; we need another mul_gate @@ -363,7 +377,7 @@ mul_quad_ serialize_mul_quad_gate(Acir::Expression const& arg) // Note: mul_terms are tuples of the form {selector_value, witness_idx_1, witness_idx_2} if (!arg.mul_terms.empty()) { const auto& mul_term = arg.mul_terms[0]; - quad.mul_scaling = uint256_t(std::get<0>(mul_term)); + quad.mul_scaling = from_be_bytes(std::get<0>(mul_term)); quad.a = std::get<1>(mul_term).value; quad.b = std::get<2>(mul_term).value; a_set = true; @@ -371,7 +385,7 @@ mul_quad_ serialize_mul_quad_gate(Acir::Expression const& arg) } // If necessary, set values for linears terms q_l * w_l, q_r * w_r and q_o * w_o for (const auto& linear_term : arg.linear_combinations) { - fr selector_value(uint256_t(std::get<0>(linear_term))); + fr selector_value(from_be_bytes(std::get<0>(linear_term))); uint32_t witness_idx = std::get<1>(linear_term).value; // If the witness index has not yet been set or if the corresponding linear term is active, set the witness @@ -408,7 +422,7 @@ mul_quad_ serialize_mul_quad_gate(Acir::Expression const& arg) } // Set constant value q_c - quad.const_scaling = uint256_t(arg.q_c); + quad.const_scaling = from_be_bytes(arg.q_c); return quad; } @@ -522,7 +536,7 @@ void handle_arithmetic(Acir::Opcode::AssertZero const& arg, AcirFormat& af, size } uint32_t get_witness_from_function_input(Acir::FunctionInput input) { - auto input_witness = std::get(input.input.value); + auto input_witness = std::get(input.value); return input_witness.value.value; } @@ -531,16 +545,16 @@ WitnessOrConstant parse_input(Acir::FunctionInput input) WitnessOrConstant result = std::visit( [&](auto&& e) { using T = std::decay_t; - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v) { return WitnessOrConstant{ .index = e.value.value, .value = bb::fr::zero(), .is_constant = false, }; - } else if constexpr (std::is_same_v) { + } else if constexpr (std::is_same_v) { return WitnessOrConstant{ .index = 0, - .value = uint256_t(e.value), + .value = from_be_bytes(e.value), .is_constant = true, }; } else { @@ -552,7 +566,7 @@ WitnessOrConstant parse_input(Acir::FunctionInput input) .is_constant = true, }; }, - input.input.value); + input.value); return result; } @@ -568,7 +582,7 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo .a = lhs_input, .b = rhs_input, .result = arg.output.value, - .num_bits = arg.lhs.num_bits, + .num_bits = arg.num_bits, .is_xor_gate = false, }); af.constrained_witness.insert(af.logic_constraints.back().result); @@ -580,7 +594,7 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo .a = lhs_input, .b = rhs_input, .result = arg.output.value, - .num_bits = arg.lhs.num_bits, + .num_bits = arg.num_bits, .is_xor_gate = true, }); af.constrained_witness.insert(af.logic_constraints.back().result); @@ -589,15 +603,15 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo auto witness_input = get_witness_from_function_input(arg.input); af.range_constraints.push_back(RangeConstraint{ .witness = witness_input, - .num_bits = arg.input.num_bits, + .num_bits = arg.num_bits, }); af.original_opcode_indices.range_constraints.push_back(opcode_index); if (af.minimal_range.contains(witness_input)) { - if (af.minimal_range[witness_input] > arg.input.num_bits) { - af.minimal_range[witness_input] = arg.input.num_bits; + if (af.minimal_range[witness_input] > arg.num_bits) { + af.minimal_range[witness_input] = arg.num_bits; } } else { - af.minimal_range[witness_input] = arg.input.num_bits; + af.minimal_range[witness_input] = arg.num_bits; } } else if constexpr (std::is_same_v) { af.aes128_constraints.push_back(AES128Constraint{ @@ -626,7 +640,7 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo [](auto& e) { return Blake2sInput{ .blackbox_input = parse_input(e), - .num_bits = e.num_bits, + .num_bits = 8, }; }), .result = transform::map(*arg.outputs, [](auto& e) { return e.value; }), @@ -637,13 +651,9 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo af.original_opcode_indices.blake2s_constraints.push_back(opcode_index); } else if constexpr (std::is_same_v) { af.blake3_constraints.push_back(Blake3Constraint{ - .inputs = transform::map(arg.inputs, - [](auto& e) { - return Blake3Input{ - .blackbox_input = parse_input(e), - .num_bits = e.num_bits, - }; - }), + .inputs = transform::map( + arg.inputs, + [](auto& e) { return Blake3Input{ .blackbox_input = parse_input(e), .num_bits = 8 }; }), .result = transform::map(*arg.outputs, [](auto& e) { return e.value; }), }); for (auto& output : af.blake3_constraints.back().result) { @@ -651,7 +661,7 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo } af.original_opcode_indices.blake3_constraints.push_back(opcode_index); } else if constexpr (std::is_same_v) { - af.ecdsa_k1_constraints.push_back(EcdsaSecp256k1Constraint{ + af.ecdsa_k1_constraints.push_back(EcdsaConstraint{ .hashed_message = transform::map(*arg.hashed_message, [](auto& e) { return get_witness_from_function_input(e); }), .signature = @@ -660,21 +670,23 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo transform::map(*arg.public_key_x, [](auto& e) { return get_witness_from_function_input(e); }), .pub_y_indices = transform::map(*arg.public_key_y, [](auto& e) { return get_witness_from_function_input(e); }), + .predicate = parse_input(arg.predicate), .result = arg.output.value, }); af.constrained_witness.insert(af.ecdsa_k1_constraints.back().result); af.original_opcode_indices.ecdsa_k1_constraints.push_back(opcode_index); } else if constexpr (std::is_same_v) { - af.ecdsa_r1_constraints.push_back(EcdsaSecp256r1Constraint{ + af.ecdsa_r1_constraints.push_back(EcdsaConstraint{ .hashed_message = transform::map(*arg.hashed_message, [](auto& e) { return get_witness_from_function_input(e); }), + .signature = + transform::map(*arg.signature, [](auto& e) { return get_witness_from_function_input(e); }), .pub_x_indices = transform::map(*arg.public_key_x, [](auto& e) { return get_witness_from_function_input(e); }), .pub_y_indices = transform::map(*arg.public_key_y, [](auto& e) { return get_witness_from_function_input(e); }), + .predicate = parse_input(arg.predicate), .result = arg.output.value, - .signature = - transform::map(*arg.signature, [](auto& e) { return get_witness_from_function_input(e); }), }); af.constrained_witness.insert(af.ecdsa_r1_constraints.back().result); af.original_opcode_indices.ecdsa_r1_constraints.push_back(opcode_index); @@ -727,7 +739,11 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo auto input_key = get_witness_from_function_input(arg.key_hash); auto proof_type_in = arg.proof_type; - + auto predicate = parse_input(arg.predicate); + if (predicate.is_constant && predicate.value.is_zero()) { + // No constraint if the recursion is disabled + return; + } auto c = RecursionConstraint{ .key = transform::map(arg.verification_key, [](auto& e) { return get_witness_from_function_input(e); }), @@ -736,6 +752,7 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo transform::map(arg.public_inputs, [](auto& e) { return get_witness_from_function_input(e); }), .key_hash = input_key, .proof_type = proof_type_in, + .predicate = predicate, }; // Add the recursion constraint to the appropriate container based on proof type @@ -751,69 +768,24 @@ void handle_blackbox_func_call(Acir::Opcode::BlackBoxFuncCall const& arg, AcirFo case PG: case PG_TAIL: case PG_FINAL: - af.ivc_recursion_constraints.push_back(c); - af.original_opcode_indices.ivc_recursion_constraints.push_back(opcode_index); + af.pg_recursion_constraints.push_back(c); + af.original_opcode_indices.pg_recursion_constraints.push_back(opcode_index); break; case AVM: af.avm_recursion_constraints.push_back(c); af.original_opcode_indices.avm_recursion_constraints.push_back(opcode_index); break; + case CIVC: + af.civc_recursion_constraints.push_back(c); + af.original_opcode_indices.civc_recursion_constraints.push_back(opcode_index); + break; default: throw_or_abort("Invalid PROOF_TYPE in RecursionConstraint!"); } - } else if constexpr (std::is_same_v) { - af.bigint_from_le_bytes_constraints.push_back(BigIntFromLeBytes{ - .inputs = transform::map(arg.inputs, [](auto& e) { return get_witness_from_function_input(e); }), - .modulus = transform::map(arg.modulus, [](auto& e) -> uint32_t { return e; }), - .result = arg.output, - }); - af.original_opcode_indices.bigint_from_le_bytes_constraints.push_back(opcode_index); - } else if constexpr (std::is_same_v) { - af.bigint_to_le_bytes_constraints.push_back(BigIntToLeBytes{ - .input = arg.input, - .result = transform::map(arg.outputs, [](auto& e) { return e.value; }), - }); - for (auto& output : af.bigint_to_le_bytes_constraints.back().result) { - af.constrained_witness.insert(output); - } - af.original_opcode_indices.bigint_to_le_bytes_constraints.push_back(opcode_index); - } else if constexpr (std::is_same_v) { - af.bigint_operations.push_back(BigIntOperation{ - .lhs = arg.lhs, - .rhs = arg.rhs, - .result = arg.output, - .opcode = BigIntOperationType::Add, - }); - af.original_opcode_indices.bigint_operations.push_back(opcode_index); - } else if constexpr (std::is_same_v) { - af.bigint_operations.push_back(BigIntOperation{ - .lhs = arg.lhs, - .rhs = arg.rhs, - .result = arg.output, - .opcode = BigIntOperationType::Sub, - }); - af.original_opcode_indices.bigint_operations.push_back(opcode_index); - } else if constexpr (std::is_same_v) { - af.bigint_operations.push_back(BigIntOperation{ - .lhs = arg.lhs, - .rhs = arg.rhs, - .result = arg.output, - .opcode = BigIntOperationType::Mul, - }); - af.original_opcode_indices.bigint_operations.push_back(opcode_index); - } else if constexpr (std::is_same_v) { - af.bigint_operations.push_back(BigIntOperation{ - .lhs = arg.lhs, - .rhs = arg.rhs, - .result = arg.output, - .opcode = BigIntOperationType::Div, - }); - af.original_opcode_indices.bigint_operations.push_back(opcode_index); } else if constexpr (std::is_same_v) { af.poseidon2_constraints.push_back(Poseidon2Constraint{ .state = transform::map(arg.inputs, [](auto& e) { return parse_input(e); }), .result = transform::map(arg.outputs, [](auto& e) { return e.value; }), - .len = arg.len, }); for (auto& output : af.poseidon2_constraints.back().result) { af.constrained_witness.insert(output); @@ -859,7 +831,7 @@ BlockConstraint handle_memory_init(Acir::Opcode::MemoryInit const& mem_init) bool is_rom(Acir::MemOp const& mem_op) { return mem_op.operation.mul_terms.empty() && mem_op.operation.linear_combinations.empty() && - uint256_t(mem_op.operation.q_c) == 0; + from_be_bytes(mem_op.operation.q_c) == 0; } uint32_t poly_to_witness(const poly_triple poly) @@ -990,7 +962,7 @@ WitnessVector witness_map_to_witness_vector(Witnesses::WitnessMap const& witness wv.emplace_back(0); index++; } - wv.emplace_back(uint256_t(e.second)); + wv.emplace_back(from_be_bytes(e.second)); index++; } return wv; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.cpp index 907ef89aab3e..8f9b61729ea3 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.cpp @@ -46,98 +46,67 @@ namespace { */ void create_dummy_vkey_and_proof(Builder& builder, [[maybe_unused]] size_t proof_size, - size_t public_inputs_size, const std::vector& key_fields, const std::vector& proof_fields) { + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1514): restructure this function to use functions from + // mock_verifier_inputs using Flavor = avm2::AvmFlavor; - // Relevant source for proof layout: AvmFlavor::Transcript::serialize_full_transcript() - // TODO(#13390): Revive this assertion (and remove the >= 0 one) once we freeze the number of colums in AVM. - // assert((proof_size - Flavor::NUM_WITNESS_ENTITIES * Flavor::NUM_FRS_COM - - // (Flavor::NUM_ALL_ENTITIES + 1) * Flavor::NUM_FRS_FR - Flavor::NUM_FRS_COM) % - // (Flavor::NUM_FRS_COM + Flavor::NUM_FRS_FR * (Flavor::BATCHED_RELATION_PARTIAL_LENGTH + 1)) == - // 0); - - // Derivation of circuit size based on the proof - // TODO#13390): Revive the following code once we freeze the number of colums in AVM. - // const auto log_circuit_size = - // (proof_size - Flavor::NUM_WITNESS_ENTITIES * Flavor::NUM_FRS_COM - - // (Flavor::NUM_ALL_ENTITIES + 1) * Flavor::NUM_FRS_FR - Flavor::NUM_FRS_COM) / - // (Flavor::NUM_FRS_COM + Flavor::NUM_FRS_FR * (Flavor::BATCHED_RELATION_PARTIAL_LENGTH + 1)); - const auto log_circuit_size = numeric::get_msb(avm2::CIRCUIT_SUBGROUP_SIZE); - - // First key field is log circuit size - builder.set_variable(key_fields[0].witness_index, log_circuit_size); - // Second key field is number of public inputs - builder.set_variable(key_fields[1].witness_index, public_inputs_size); - - size_t offset = 2; - for (size_t i = 0; i < Flavor::NUM_PRECOMPUTED_ENTITIES; ++i) { + // a lambda that sets dummy commitments + auto set_dummy_commitment = [&builder](const std::vector>& fields, size_t& offset) { auto comm = curve::BN254::AffineElement::one() * fr::random_element(); auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(key_fields[offset].witness_index, frs[0]); - builder.set_variable(key_fields[offset + 1].witness_index, frs[1]); - builder.set_variable(key_fields[offset + 2].witness_index, frs[2]); - builder.set_variable(key_fields[offset + 3].witness_index, frs[3]); + builder.set_variable(fields[offset].witness_index, frs[0]); + builder.set_variable(fields[offset + 1].witness_index, frs[1]); + builder.set_variable(fields[offset + 2].witness_index, frs[2]); + builder.set_variable(fields[offset + 3].witness_index, frs[3]); offset += 4; + }; + // a lambda that sets dummy evaluation in proof fields vector + auto set_dummy_evaluation_in_proof_fields = [&](size_t& offset) { + builder.set_variable(proof_fields[offset].witness_index, fr::random_element()); + offset++; + }; + + size_t offset = 0; + for (size_t i = 0; i < Flavor::NUM_PRECOMPUTED_ENTITIES; ++i) { + set_dummy_commitment(key_fields, offset); } // This routine is adding some placeholders for avm proof and avm vk in the case where witnesses are not present. // TODO(#14234)[Unconditional PIs validation]: Remove next line and use offset == 0 for subsequent line. builder.set_variable(proof_fields[0].witness_index, 1); - builder.set_variable(proof_fields[1].witness_index, 1 << log_circuit_size); - offset = 2; // TODO(#14234)[Unconditional PIs validation]: reset offset = 1 + offset = 1; // TODO(#14234)[Unconditional PIs validation]: reset offset = 1 // Witness Commitments for (size_t i = 0; i < Flavor::NUM_WITNESS_ENTITIES; i++) { - auto comm = curve::BN254::AffineElement::one() * fr::random_element(); - auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(proof_fields[offset].witness_index, frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, frs[1]); - builder.set_variable(proof_fields[offset + 2].witness_index, frs[2]); - builder.set_variable(proof_fields[offset + 3].witness_index, frs[3]); - offset += 4; + set_dummy_commitment(proof_fields, offset); } // now the univariates - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N * Flavor::BATCHED_RELATION_PARTIAL_LENGTH; i++) { - builder.set_variable(proof_fields[offset].witness_index, fr::random_element()); - offset++; + for (size_t i = 0; i < avm2::MAX_AVM_TRACE_LOG_SIZE * Flavor::BATCHED_RELATION_PARTIAL_LENGTH; i++) { + set_dummy_evaluation_in_proof_fields(offset); } // now the sumcheck evaluations for (size_t i = 0; i < Flavor::NUM_ALL_ENTITIES; i++) { - builder.set_variable(proof_fields[offset].witness_index, fr::random_element()); - offset++; + set_dummy_evaluation_in_proof_fields(offset); } // now the gemini fold commitments which are CONST_PROOF_SIZE_LOG_N - 1 - for (size_t i = 1; i < CONST_PROOF_SIZE_LOG_N; i++) { - auto comm = curve::BN254::AffineElement::one() * fr::random_element(); - auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(proof_fields[offset].witness_index, frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, frs[1]); - builder.set_variable(proof_fields[offset + 2].witness_index, frs[2]); - builder.set_variable(proof_fields[offset + 3].witness_index, frs[3]); - offset += 4; + for (size_t i = 1; i < avm2::MAX_AVM_TRACE_LOG_SIZE; i++) { + set_dummy_commitment(proof_fields, offset); } // the gemini fold evaluations which are CONST_PROOF_SIZE_LOG_N - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N; i++) { - builder.set_variable(proof_fields[offset].witness_index, fr::random_element()); - offset++; + for (size_t i = 0; i < avm2::MAX_AVM_TRACE_LOG_SIZE; i++) { + set_dummy_evaluation_in_proof_fields(offset); } // lastly the shplonk batched quotient commitment and kzg quotient commitment for (size_t i = 0; i < 2; i++) { - auto comm = curve::BN254::AffineElement::one() * fr::random_element(); - auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(proof_fields[offset].witness_index, frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, frs[1]); - builder.set_variable(proof_fields[offset + 2].witness_index, frs[2]); - builder.set_variable(proof_fields[offset + 3].witness_index, frs[3]); - offset += 4; + set_dummy_commitment(proof_fields, offset); } // TODO(#13390): Revive the following assertion once we freeze the number of colums in AVM. @@ -163,23 +132,14 @@ HonkRecursionConstraintOutput create_avm2_recursion_constraints_goblin( BB_ASSERT_EQ(input.proof_type, AVM); - auto fields_from_witnesses = [&](const std::vector& input) { - std::vector result; - result.reserve(input.size()); - for (const auto& idx : input) { - result.emplace_back(field_ct::from_witness_index(&builder, idx)); - } - return result; - }; - // Construct in-circuit representations of the verification key, proof and public inputs - const auto key_fields = fields_from_witnesses(input.key); - const auto proof_fields = fields_from_witnesses(input.proof); - const auto public_inputs_flattened = fields_from_witnesses(input.public_inputs); + const auto key_fields = RecursionConstraint::fields_from_witnesses(builder, input.key); + const auto proof_fields = RecursionConstraint::fields_from_witnesses(builder, input.proof); + const auto public_inputs_flattened = RecursionConstraint::fields_from_witnesses(builder, input.public_inputs); // Populate the key fields and proof fields with dummy values to prevent issues (e.g. points must be on curve). if (!has_valid_witness_assignments) { - create_dummy_vkey_and_proof(builder, input.proof.size(), input.public_inputs.size(), key_fields, proof_fields); + create_dummy_vkey_and_proof(builder, input.proof.size(), key_fields, proof_fields); } // Execute the Goblin AVM2 recursive verifier diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.test.cpp index 3c4777157d76..3f4844fe0eb0 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/avm2_recursion_constraint.test.cpp @@ -5,8 +5,8 @@ #include "barretenberg/dsl/acir_format/acir_format_mocks.hpp" #include "barretenberg/dsl/acir_format/avm2_recursion_constraint.hpp" #include "barretenberg/dsl/acir_format/proof_surgeon.hpp" +#include "barretenberg/dsl/acir_format/utils.hpp" #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" #include "barretenberg/vm2/common/avm_inputs.hpp" @@ -39,7 +39,7 @@ class AcirAvm2RecursionConstraint : public ::testing::Test { using OuterFlavor = UltraRollupFlavor; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using OuterVerificationKey = OuterFlavor::VerificationKey; using OuterBuilder = UltraCircuitBuilder; @@ -80,23 +80,10 @@ class AcirAvm2RecursionConstraint : public ::testing::Test { const std::vector proof_witnesses = inner_circuit_data.proof; const std::vector public_inputs_witnesses = inner_circuit_data.public_inputs_flat; - // Helper to append some values to the witness vector and return their corresponding indices - auto add_to_witness_and_track_indices = - [&witness](const std::vector& input) -> std::vector { - std::vector indices; - indices.reserve(input.size()); - auto witness_idx = static_cast(witness.size()); - for (const auto& value : input) { - witness.push_back(value); - indices.push_back(witness_idx++); - } - return indices; - }; - RecursionConstraint avm_recursion_constraint{ - .key = add_to_witness_and_track_indices(key_witnesses), - .proof = add_to_witness_and_track_indices(proof_witnesses), - .public_inputs = add_to_witness_and_track_indices(public_inputs_witnesses), + .key = add_to_witness_and_track_indices(witness, key_witnesses), + .proof = add_to_witness_and_track_indices(witness, proof_witnesses), + .public_inputs = add_to_witness_and_track_indices(witness, public_inputs_witnesses), .key_hash = 0, // not used .proof_type = AVM, }; @@ -133,14 +120,14 @@ TEST_F(AcirAvm2RecursionConstraint, TestBasicSingleAvm2RecursionConstraint) info("circuit gates = ", layer_2_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_2_circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, verification_key); - info("prover gates = ", proving_key->dyadic_size()); + auto prover_instance = std::make_shared(layer_2_circuit); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, verification_key); + info("prover gates = ", prover_instance->dyadic_size()); auto proof = prover.construct_proof(); VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); OuterVerifier verifier(verification_key, ipa_verification_key); - bool result = verifier.template verify_proof(proof, proving_key->ipa_proof).result; + bool result = verifier.template verify_proof(proof, prover_instance->ipa_proof).result; EXPECT_TRUE(result); } @@ -165,17 +152,17 @@ TEST_F(AcirAvm2RecursionConstraint, TestGenerateVKFromConstraintsWithoutWitness) info("circuit gates = ", layer_2_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_2_circuit); - expected_vk = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, expected_vk); - info("prover gates = ", proving_key->dyadic_size()); + auto prover_instance = std::make_shared(layer_2_circuit); + expected_vk = std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, expected_vk); + info("prover gates = ", prover_instance->dyadic_size()); // Construct and verify a proof of the outer AVM verifier circuits auto proof = prover.construct_proof(); VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); OuterVerifier verifier(expected_vk, ipa_verification_key); - bool result = verifier.template verify_proof(proof, proving_key->ipa_proof).result; + bool result = verifier.template verify_proof(proof, prover_instance->ipa_proof).result; EXPECT_TRUE(result); } @@ -191,10 +178,10 @@ TEST_F(AcirAvm2RecursionConstraint, TestGenerateVKFromConstraintsWithoutWitness) info("circuit gates = ", layer_2_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_2_circuit); - actual_vk = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, actual_vk); - info("prover gates = ", proving_key->dyadic_size()); + auto prover_instance = std::make_shared(layer_2_circuit); + actual_vk = std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, actual_vk); + info("prover gates = ", prover_instance->dyadic_size()); } // Compare the VK constructed via running the IVC with the one constructed via mocking diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.cpp deleted file mode 100644 index 13bb2ab2aedc..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.cpp +++ /dev/null @@ -1,477 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#include "bigint_constraint.hpp" -#include "barretenberg/common/assert.hpp" -#include "barretenberg/numeric/uint256/uint256.hpp" -#include "barretenberg/numeric/uintx/uintx.hpp" -#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" -#include -#include - -namespace acir_format { - -using namespace bb; - -ModulusId modulus_param_to_id(ModulusParam param) -{ - if (Bn254FqParams::modulus_0 == param.modulus_0 && Bn254FqParams::modulus_1 == param.modulus_1 && - Bn254FqParams::modulus_2 == param.modulus_2 && Bn254FqParams::modulus_3 == param.modulus_3) { - return ModulusId::BN254_FQ; - } - if (Bn254FrParams::modulus_0 == param.modulus_0 && Bn254FrParams::modulus_1 == param.modulus_1 && - Bn254FrParams::modulus_2 == param.modulus_2 && Bn254FrParams::modulus_3 == param.modulus_3) { - return ModulusId::BN254_FR; - } - if (secp256k1::FqParams::modulus_0 == param.modulus_0 && secp256k1::FqParams::modulus_1 == param.modulus_1 && - secp256k1::FqParams::modulus_2 == param.modulus_2 && secp256k1::FqParams::modulus_3 == param.modulus_3) { - return ModulusId::SECP256K1_FQ; - } - if (secp256k1::FrParams::modulus_0 == param.modulus_0 && secp256k1::FrParams::modulus_1 == param.modulus_1 && - secp256k1::FrParams::modulus_2 == param.modulus_2 && secp256k1::FrParams::modulus_3 == param.modulus_3) { - return ModulusId::SECP256K1_FR; - } - if (secp256r1::FqParams::modulus_0 == param.modulus_0 && secp256r1::FqParams::modulus_1 == param.modulus_1 && - secp256r1::FqParams::modulus_2 == param.modulus_2 && secp256r1::FqParams::modulus_3 == param.modulus_3) { - return ModulusId::SECP256R1_FQ; - } - if (secp256r1::FrParams::modulus_0 == param.modulus_0 && secp256r1::FrParams::modulus_1 == param.modulus_1 && - secp256r1::FrParams::modulus_2 == param.modulus_2 && secp256r1::FrParams::modulus_3 == param.modulus_3) { - return ModulusId::SECP256R1_FR; - } - return ModulusId::UNKNOWN; -} - -template void create_bigint_operations_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments); -template void create_bigint_operations_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments); -template void create_bigint_addition_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_addition_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_sub_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_sub_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_mul_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_mul_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint); -template void create_bigint_div_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments); -template void create_bigint_div_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments); - -template -void create_bigint_addition_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigint) -{ - switch (dsl_bigint.get_modulus_id(input.lhs)) { - case ModulusId::BN254_FR: { - auto lhs = dsl_bigint.bn254_fr(input.lhs); - auto rhs = dsl_bigint.bn254_fr(input.rhs); - dsl_bigint.set_bn254_fr(lhs + rhs, input.result); - break; - } - case ModulusId::BN254_FQ: { - auto lhs = dsl_bigint.bn254_fq(input.lhs); - auto rhs = dsl_bigint.bn254_fq(input.rhs); - dsl_bigint.set_bn254_fq(lhs + rhs, input.result); - break; - } - case ModulusId::SECP256K1_FQ: { - auto lhs = dsl_bigint.secp256k1_fq(input.lhs); - auto rhs = dsl_bigint.secp256k1_fq(input.rhs); - dsl_bigint.set_secp256k1_fq(lhs + rhs, input.result); - break; - } - case ModulusId::SECP256K1_FR: { - auto lhs = dsl_bigint.secp256k1_fr(input.lhs); - auto rhs = dsl_bigint.secp256k1_fr(input.rhs); - dsl_bigint.set_secp256k1_fr(lhs + rhs, input.result); - break; - } - case ModulusId::SECP256R1_FQ: { - auto lhs = dsl_bigint.secp256r1_fq(input.lhs); - auto rhs = dsl_bigint.secp256r1_fq(input.rhs); - dsl_bigint.set_secp256r1_fq(lhs + rhs, input.result); - break; - } - case ModulusId::SECP256R1_FR: { - auto lhs = dsl_bigint.secp256r1_fr(input.lhs); - auto rhs = dsl_bigint.secp256r1_fr(input.rhs); - dsl_bigint.set_secp256r1_fr(lhs + rhs, input.result); - break; - } - default: { - throw_or_abort("Unexpected Modulus ID"); - } - } -} - -template -void create_bigint_sub_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigint) -{ - switch (dsl_bigint.get_modulus_id(input.lhs)) { - case ModulusId::BN254_FR: { - auto lhs = dsl_bigint.bn254_fr(input.lhs); - auto rhs = dsl_bigint.bn254_fr(input.rhs); - dsl_bigint.set_bn254_fr(lhs - rhs, input.result); - break; - } - case ModulusId::BN254_FQ: { - auto lhs = dsl_bigint.bn254_fq(input.lhs); - auto rhs = dsl_bigint.bn254_fq(input.rhs); - dsl_bigint.set_bn254_fq(lhs - rhs, input.result); - break; - } - case ModulusId::SECP256K1_FQ: { - auto lhs = dsl_bigint.secp256k1_fq(input.lhs); - auto rhs = dsl_bigint.secp256k1_fq(input.rhs); - dsl_bigint.set_secp256k1_fq(lhs - rhs, input.result); - break; - } - case ModulusId::SECP256K1_FR: { - auto lhs = dsl_bigint.secp256k1_fr(input.lhs); - auto rhs = dsl_bigint.secp256k1_fr(input.rhs); - dsl_bigint.set_secp256k1_fr(lhs - rhs, input.result); - break; - } - case ModulusId::SECP256R1_FQ: { - auto lhs = dsl_bigint.secp256r1_fq(input.lhs); - auto rhs = dsl_bigint.secp256r1_fq(input.rhs); - dsl_bigint.set_secp256r1_fq(lhs - rhs, input.result); - break; - } - case ModulusId::SECP256R1_FR: { - auto lhs = dsl_bigint.secp256r1_fr(input.lhs); - auto rhs = dsl_bigint.secp256r1_fr(input.rhs); - dsl_bigint.set_secp256r1_fr(lhs - rhs, input.result); - break; - } - default: { - throw_or_abort("Unexpected Modulus ID"); - } - } -} - -template -void create_bigint_mul_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigint) -{ - switch (dsl_bigint.get_modulus_id(input.lhs)) { - case ModulusId::BN254_FR: { - auto lhs = dsl_bigint.bn254_fr(input.lhs); - auto rhs = dsl_bigint.bn254_fr(input.rhs); - dsl_bigint.set_bn254_fr(lhs * rhs, input.result); - break; - } - case ModulusId::BN254_FQ: { - auto lhs = dsl_bigint.bn254_fq(input.lhs); - auto rhs = dsl_bigint.bn254_fq(input.rhs); - dsl_bigint.set_bn254_fq(lhs * rhs, input.result); - break; - } - case ModulusId::SECP256K1_FQ: { - auto lhs = dsl_bigint.secp256k1_fq(input.lhs); - auto rhs = dsl_bigint.secp256k1_fq(input.rhs); - dsl_bigint.set_secp256k1_fq(lhs * rhs, input.result); - break; - } - case ModulusId::SECP256K1_FR: { - auto lhs = dsl_bigint.secp256k1_fr(input.lhs); - auto rhs = dsl_bigint.secp256k1_fr(input.rhs); - dsl_bigint.set_secp256k1_fr(lhs * rhs, input.result); - break; - } - case ModulusId::SECP256R1_FQ: { - auto lhs = dsl_bigint.secp256r1_fq(input.lhs); - auto rhs = dsl_bigint.secp256r1_fq(input.rhs); - dsl_bigint.set_secp256r1_fq(lhs * rhs, input.result); - break; - } - case ModulusId::SECP256R1_FR: { - auto lhs = dsl_bigint.secp256r1_fr(input.lhs); - auto rhs = dsl_bigint.secp256r1_fr(input.rhs); - dsl_bigint.set_secp256r1_fr(lhs * rhs, input.result); - break; - } - default: { - throw_or_abort("Unexpected Modulus ID"); - } - } -} - -template -void create_bigint_div_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments) -{ - if (!has_valid_witness_assignments) { - // Asserts catch the case where the divisor is zero, so we need to provide a different value (1) to avoid the - // assert - std::array limbs_idx; - dsl_bigint.get_witness_idx_of_limbs(input.rhs, limbs_idx); - dsl_bigint.set_value(1, limbs_idx); - } - - switch (dsl_bigint.get_modulus_id(input.lhs)) { - case ModulusId::BN254_FR: { - auto lhs = dsl_bigint.bn254_fr(input.lhs); - auto rhs = dsl_bigint.bn254_fr(input.rhs); - dsl_bigint.set_bn254_fr(lhs / rhs, input.result); - break; - } - case ModulusId::BN254_FQ: { - auto lhs = dsl_bigint.bn254_fq(input.lhs); - auto rhs = dsl_bigint.bn254_fq(input.rhs); - dsl_bigint.set_bn254_fq(lhs / rhs, input.result); - break; - } - case ModulusId::SECP256K1_FQ: { - auto lhs = dsl_bigint.secp256k1_fq(input.lhs); - auto rhs = dsl_bigint.secp256k1_fq(input.rhs); - dsl_bigint.set_secp256k1_fq(lhs / rhs, input.result); - break; - } - case ModulusId::SECP256K1_FR: { - auto lhs = dsl_bigint.secp256k1_fr(input.lhs); - auto rhs = dsl_bigint.secp256k1_fr(input.rhs); - dsl_bigint.set_secp256k1_fr(lhs / rhs, input.result); - break; - } - case ModulusId::SECP256R1_FQ: { - auto lhs = dsl_bigint.secp256r1_fq(input.lhs); - auto rhs = dsl_bigint.secp256r1_fq(input.rhs); - dsl_bigint.set_secp256r1_fq(lhs / rhs, input.result); - break; - } - case ModulusId::SECP256R1_FR: { - auto lhs = dsl_bigint.secp256r1_fr(input.lhs); - auto rhs = dsl_bigint.secp256r1_fr(input.rhs); - dsl_bigint.set_secp256r1_fr(lhs / rhs, input.result); - break; - } - default: { - throw_or_abort("Unexpected Modulus ID"); - } - } -} - -template -void create_bigint_operations_constraint(const BigIntOperation& input, - DSLBigInts& dsl_bigint, - bool has_valid_witness_assignments) -{ - switch (input.opcode) { - case BigIntOperationType::Add: { - create_bigint_addition_constraint(input, dsl_bigint); - break; - } - case BigIntOperationType::Sub: { - create_bigint_sub_constraint(input, dsl_bigint); - break; - } - case BigIntOperationType::Mul: { - create_bigint_mul_constraint(input, dsl_bigint); - break; - } - case BigIntOperationType::Div: { - create_bigint_div_constraint(input, dsl_bigint, has_valid_witness_assignments); - break; - } - default: { - throw_or_abort("Unexpected BigIntOperationType"); - } - } -} - -template -void create_bigint_from_le_bytes_constraint(Builder& builder, - const BigIntFromLeBytes& input, - DSLBigInts& dsl_bigints) -{ - using big_bn254_fq = bb::stdlib::bigfield; - using big_bn254_fr = bb::stdlib::bigfield; - using big_secp256k1_fq = bb::stdlib::bigfield; - using big_secp256k1_fr = bb::stdlib::bigfield; - using big_secp256r1_fq = bb::stdlib::bigfield; - using big_secp256r1_fr = bb::stdlib::bigfield; - using field_ct = bb::stdlib::field_t; - using byte_array_ct = bb::stdlib::byte_array; - - // Construct the modulus from its bytes - uint64_t modulus_64 = 0; - uint64_t base = 1; - std::vector modulus_limbs; - for (std::size_t i = 0; i < 32; ++i) { - if (i < input.modulus.size()) { - modulus_64 += input.modulus[i] * base; - base = base * 256; - if ((i + 1) % 8 == 0) { - modulus_limbs.push_back(modulus_64); - modulus_64 = 0; - base = 1; - } - } - } - auto modulus = ModulusParam{ .modulus_0 = modulus_limbs[0], - .modulus_1 = modulus_limbs[1], - .modulus_2 = modulus_limbs[2], - .modulus_3 = modulus_limbs[3] }; - bb::stdlib::byte_array rev_bytes = bb::stdlib::byte_array(&builder, 32); - for (size_t i = 0; i < 32; ++i) { - if (i < input.inputs.size()) { - field_ct element = field_ct::from_witness_index(&builder, input.inputs[i]); - byte_array_ct element_bytes(element, 1); - rev_bytes.write_at(element_bytes, i); - } else { - rev_bytes[i] = 0; - } - } - bb::stdlib::byte_array bytes = rev_bytes.reverse(); - - auto modulus_id = modulus_param_to_id(modulus); - - switch (modulus_id) { - case BN254_FQ: { - auto big = big_bn254_fq(bytes); - dsl_bigints.set_bn254_fq(big, input.result); - break; - } - case BN254_FR: { - auto big = big_bn254_fr(bytes); - dsl_bigints.set_bn254_fr(big, input.result); - break; - } - case SECP256K1_FQ: { - auto big = big_secp256k1_fq(bytes); - dsl_bigints.set_secp256k1_fq(big, input.result); - break; - } - case SECP256K1_FR: { - auto big = big_secp256k1_fr(bytes); - dsl_bigints.set_secp256k1_fr(big, input.result); - break; - } - case SECP256R1_FQ: { - auto big = big_secp256r1_fq(bytes); - dsl_bigints.set_secp256r1_fq(big, input.result); - break; - } - case SECP256R1_FR: { - auto big = big_secp256r1_fr(bytes); - dsl_bigints.set_secp256r1_fr(big, input.result); - break; - } - case UNKNOWN: - default: - throw_or_abort("Unexpected Modulus ID"); - break; - } -} - -template -void create_bigint_to_le_bytes_constraint(Builder& builder, - const BigIntToLeBytes& input, - DSLBigInts& dsl_bigints) -{ - using big_bn254_fq = bb::stdlib::bigfield; - using big_bn254_fr = bb::stdlib::bigfield; - using big_secp256k1_fq = bb::stdlib::bigfield; - using big_secp256k1_fr = bb::stdlib::bigfield; - using big_secp256r1_fq = bb::stdlib::bigfield; - using big_secp256r1_fr = bb::stdlib::bigfield; - - auto modulus_id = dsl_bigints.get_modulus_id(input.input); - bb::stdlib::byte_array byte_array; - switch (modulus_id) { - case BN254_FQ: { - big_bn254_fq big = dsl_bigints.bn254_fq(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - - break; - } - case BN254_FR: { - big_bn254_fr big = dsl_bigints.bn254_fr(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - break; - } - case SECP256K1_FQ: { - big_secp256k1_fq big = dsl_bigints.secp256k1_fq(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - break; - } - case SECP256K1_FR: { - big_secp256k1_fr big = dsl_bigints.secp256k1_fr(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - break; - } - case SECP256R1_FQ: { - big_secp256r1_fq big = dsl_bigints.secp256r1_fq(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - break; - } - case SECP256R1_FR: { - big_secp256r1_fr big = dsl_bigints.secp256r1_fr(input.input); - big.self_reduce(); - byte_array = big.to_byte_array(); - break; - } - case UNKNOWN: - default: - throw_or_abort("Unexpected Modulus ID"); - break; - } - byte_array = byte_array.reverse(); - BB_ASSERT_LTE(input.result.size(), byte_array.size()); - for (size_t i = 0; i < byte_array.size(); ++i) { - if (i < input.result.size()) { - - // This should instead use assert_equal: builder.assert_equal(byte_array[i].normalize().witness_index, - // input.result[i]); but unit tests require this because they do not constraint the witness, and then if we - // use assert_equal in that case, we can generate a proof for non matching values (cf test_assert_equal in - // field.test.cpp). We should check that Noir always constraint the results of to_bytes - poly_triple assert_equal{ - .a = byte_array[i].normalize().witness_index, - .b = input.result[i], - .c = 0, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = 0, - .q_c = 0, - }; - builder.create_poly_gate(assert_equal); - } else { - byte_array[i].normalize().is_zero(); - } - } -} - -template void create_bigint_from_le_bytes_constraint(UltraCircuitBuilder& builder, - const BigIntFromLeBytes& input, - DSLBigInts& dsl_bigints); -template void create_bigint_from_le_bytes_constraint(MegaCircuitBuilder& builder, - const BigIntFromLeBytes& input, - DSLBigInts& dsl_bigints); -template void create_bigint_to_le_bytes_constraint(UltraCircuitBuilder& builder, - const BigIntToLeBytes& input, - DSLBigInts& dsl_bigints); - -template void create_bigint_to_le_bytes_constraint(MegaCircuitBuilder& builder, - const BigIntToLeBytes& input, - DSLBigInts& dsl_bigints); - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.hpp deleted file mode 100644 index ed52dd27b5aa..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.hpp +++ /dev/null @@ -1,261 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once -#include "barretenberg/common/assert.hpp" -#include "barretenberg/ecc/curves/secp256k1/secp256k1.hpp" -#include "barretenberg/ecc/curves/secp256r1/secp256r1.hpp" -#include "barretenberg/serialize/msgpack.hpp" -#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" - -#include -#include -#include - -namespace acir_format { - -struct BigIntFromLeBytes { - std::vector inputs; - std::vector modulus; - uint32_t result; - - // For serialization, update with any new fields - MSGPACK_FIELDS(inputs, result); - friend bool operator==(BigIntFromLeBytes const& lhs, BigIntFromLeBytes const& rhs) = default; -}; - -enum BigIntOperationType { Add, Sub, Mul, Div }; - -struct BigIntOperation { - uint32_t lhs; - uint32_t rhs; - uint32_t result; - BigIntOperationType opcode; - - // For serialization, update with any new fields - MSGPACK_FIELDS(lhs, rhs, opcode, result); - friend bool operator==(BigIntOperation const& lhs, BigIntOperation const& rhs) = default; -}; - -struct BigIntToLeBytes { - uint32_t input; - std::vector result; - - // For serialization, update with any new fields - MSGPACK_FIELDS(input, result); - friend bool operator==(BigIntToLeBytes const& lhs, BigIntToLeBytes const& rhs) = default; -}; - -/// Enumerates the supported modulus types for big integer operations. -/// Specifies whether a bigint refers to a BN254/SECP256K1/SECP256R1 Fq or Fr modulus. -enum ModulusId { - BN254_FQ = 0, - BN254_FR, - SECP256K1_FQ, - SECP256K1_FR, - SECP256R1_FQ, - SECP256R1_FR, - UNKNOWN, -}; - -/// 256-bit modulus value for a field element -/// The modulus is represented by 4 64-bits limbs -/// Used to define the modulus for big integer operations. -class ModulusParam { - public: - uint64_t modulus_0; - uint64_t modulus_1; - uint64_t modulus_2; - uint64_t modulus_3; -}; - -template class DSLBigInts { - using big_bn254_fq = bb::stdlib::bigfield; - using big_bn254_fr = bb::stdlib::bigfield; - using big_secp256k1_fq = bb::stdlib::bigfield; - using big_secp256k1_fr = bb::stdlib::bigfield; - using big_secp256r1_fq = bb::stdlib::bigfield; - using big_secp256r1_fr = bb::stdlib::bigfield; - - private: - std::map m_bn254_fq; - std::map m_bn254_fr; - std::map m_secp256k1_fq; - std::map m_secp256k1_fr; - std::map m_secp256r1_fq; - std::map m_secp256r1_fr; - - Builder* builder; - - public: - DSLBigInts() = default; - - void set_builder(Builder* ctx) { builder = ctx; } - - ModulusId get_modulus_id(uint32_t bigint_id) - { - if (this->m_bn254_fq.contains(bigint_id)) { - return ModulusId::BN254_FQ; - } - if (this->m_bn254_fr.contains(bigint_id)) { - return ModulusId::BN254_FR; - } - if (this->m_secp256k1_fq.contains(bigint_id)) { - return ModulusId::SECP256K1_FQ; - } - if (this->m_secp256k1_fr.contains(bigint_id)) { - return ModulusId::SECP256K1_FR; - } - if (this->m_secp256r1_fq.contains(bigint_id)) { - return ModulusId::SECP256R1_FQ; - } - if (this->m_secp256r1_fr.contains(bigint_id)) { - return ModulusId::SECP256R1_FR; - } - - return ModulusId::UNKNOWN; - } - - /// Set value of the witnesses representing the bigfield element - /// so that the bigfield value is the input value. - /// The input value is decomposed into the binary basis for the binary limbs - /// The input array must be: - /// the 4 witness index of the binary limbs, and the index of the prime limb - void set_value(uint256_t value, const std::array limbs_idx) - { - uint256_t limb_modulus = uint256_t(1) << big_bn254_fq::NUM_LIMB_BITS; - builder->set_variable(limbs_idx[4], value); - for (uint32_t i = 0; i < 4; i++) { - uint256_t limb = value % limb_modulus; - value = (value - limb) / limb_modulus; - builder->set_variable(limbs_idx[i], limb); - } - } - - /// Utility function that retrieve the witness indexes of a bigfield element - /// for use in set_value() - void get_witness_idx_of_limbs(uint32_t bigint_id, std::array& limbs_idx) - { - if (m_bn254_fr.contains(bigint_id)) { - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = m_bn254_fr[bigint_id].binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = m_bn254_fr[bigint_id].prime_basis_limb.witness_index; - } else if (m_bn254_fq.contains(bigint_id)) { - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = m_bn254_fq[bigint_id].binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = m_bn254_fq[bigint_id].prime_basis_limb.witness_index; - } else if (m_secp256k1_fq.contains(bigint_id)) { - auto big_field = m_secp256k1_fq[bigint_id]; - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = big_field.binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = big_field.prime_basis_limb.witness_index; - } else if (m_secp256k1_fr.contains(bigint_id)) { - auto big_field = m_secp256k1_fr[bigint_id]; - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = big_field.binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = big_field.prime_basis_limb.witness_index; - } else if (m_secp256r1_fr.contains(bigint_id)) { - auto big_field = m_secp256r1_fr[bigint_id]; - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = big_field.binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = big_field.prime_basis_limb.witness_index; - } else if (m_secp256r1_fq.contains(bigint_id)) { - auto big_field = m_secp256r1_fq[bigint_id]; - for (uint32_t i = 0; i < 4; i++) { - limbs_idx[i] = big_field.binary_basis_limbs[i].element.witness_index; - } - limbs_idx[4] = big_field.prime_basis_limb.witness_index; - } - } - big_bn254_fr bn254_fr(uint32_t bigint_id) - { - ASSERT(this->m_bn254_fr.contains(bigint_id)); - return this->m_bn254_fr[bigint_id]; - } - - void set_bn254_fr(const big_bn254_fr& bigint, uint32_t bigint_id) { this->m_bn254_fr[bigint_id] = bigint; } - - big_bn254_fq bn254_fq(uint32_t bigint_id) - { - ASSERT(this->m_bn254_fq.contains(bigint_id)); - return this->m_bn254_fq[bigint_id]; - } - - void set_bn254_fq(const big_bn254_fq& bigint, uint32_t bigint_id) { this->m_bn254_fq[bigint_id] = bigint; } - - big_secp256r1_fq secp256r1_fq(uint32_t bigint_id) - { - ASSERT(this->m_secp256r1_fq.contains(bigint_id)); - return this->m_secp256r1_fq[bigint_id]; - } - - void set_secp256r1_fq(const big_secp256r1_fq& bigint, uint32_t bigint_id) - { - this->m_secp256r1_fq[bigint_id] = bigint; - } - - big_secp256r1_fr secp256r1_fr(uint32_t bigint_id) - { - ASSERT(this->m_secp256r1_fr.contains(bigint_id)); - return this->m_secp256r1_fr[bigint_id]; - } - - void set_secp256r1_fr(const big_secp256r1_fr& bigint, uint32_t bigint_id) - { - this->m_secp256r1_fr[bigint_id] = bigint; - } - - big_secp256k1_fq secp256k1_fq(uint32_t bigint_id) - { - ASSERT(this->m_secp256k1_fq.contains(bigint_id)); - return this->m_secp256k1_fq[bigint_id]; - } - - void set_secp256k1_fq(const big_secp256k1_fq& bigint, uint32_t bigint_id) - { - this->m_secp256k1_fq[bigint_id] = bigint; - } - - big_secp256k1_fr secp256k1_fr(uint32_t bigint_id) - { - if (this->m_secp256k1_fr.contains(bigint_id)) { - return this->m_secp256k1_fr[bigint_id]; - } - return { 0 }; - } - - void set_secp256k1_fr(const big_secp256k1_fr& bigint, uint32_t bigint_id) - { - this->m_secp256k1_fr[bigint_id] = bigint; - } -}; - -template -void create_bigint_from_le_bytes_constraint(Builder& builder, - const BigIntFromLeBytes& input, - DSLBigInts& dsl_bigints); -template -void create_bigint_to_le_bytes_constraint(Builder& builder, - const BigIntToLeBytes& input, - DSLBigInts& dsl_bigints); - -template -void create_bigint_operations_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigints, bool); -template -void create_bigint_addition_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigints); -template -void create_bigint_sub_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigints); -template -void create_bigint_mul_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigints); -template -void create_bigint_div_constraint(const BigIntOperation& input, DSLBigInts& dsl_bigints, bool); - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.test.cpp deleted file mode 100644 index 73e9fe855341..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/bigint_constraint.test.cpp +++ /dev/null @@ -1,456 +0,0 @@ -#include "bigint_constraint.hpp" -#include "acir_format.hpp" -#include "acir_format_mocks.hpp" -#include "barretenberg/circuit_checker/circuit_checker.hpp" -#include "barretenberg/common/throw_or_abort.hpp" -#include "barretenberg/numeric/uint256/uint256.hpp" - -#include -#include -#include - -namespace acir_format::tests { - -class BigIntTests : public ::testing::Test { - protected: - static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } -}; -using fr = field; - -std::tuple -generate_big_int_op_constraint_with_modulus( - BigIntOperationType op, fr lhs, fr rhs, WitnessVector& witness_values, const std::vector& modulus) -{ - // CAUTION We assume here the operands and the result fit into one byte! - // So trying to divide 7/2 won't work, but 8/2 will do. - auto lhs_id = static_cast(witness_values.size()); - witness_values.push_back(lhs); - auto rhs_id = static_cast(witness_values.size()); - witness_values.push_back(rhs); - BigIntFromLeBytes from_le_bytes_constraint_bigint_lhs{ - .inputs = { lhs_id }, - .modulus = modulus, - .result = lhs_id, - }; - BigIntFromLeBytes from_le_bytes_constraint_bigint_rhs{ - .inputs = { rhs_id }, - .modulus = modulus, - .result = rhs_id, - }; - - auto result = static_cast(witness_values.size()); - BigIntOperation constraint{ - .lhs = lhs_id, - .rhs = rhs_id, - .result = result, - .opcode = op, - }; - // Expecting the result to be just one byte long - BigIntToLeBytes to_bytes{ - .input = result, - .result = { static_cast(witness_values.size()) }, - }; - // overflow is NOT supported, you have to make sure there is no overflow/underflow. - fr value = 0; - switch (op) { - case Add: - value = witness_values[lhs_id] + witness_values[rhs_id]; - break; - case Sub: - value = witness_values[lhs_id] - witness_values[rhs_id]; - break; - case Mul: - value = witness_values[lhs_id] * witness_values[rhs_id]; - break; - case Div: - value = witness_values[lhs_id] / witness_values[rhs_id]; - break; - default: - throw_or_abort("Unexpected BigIntOperationType."); - break; - } - - witness_values.push_back(value); - return { from_le_bytes_constraint_bigint_lhs, from_le_bytes_constraint_bigint_rhs, constraint, to_bytes }; -} - -std::tuple generate_big_int_op_constraint( - BigIntOperationType op, fr lhs, fr rhs, WitnessVector& witness_values) -{ - // modulus is bn254/fq - return generate_big_int_op_constraint_with_modulus( - op, - lhs, - rhs, - witness_values, - { - 0x47, 0xFD, 0x7C, 0xD8, 0x16, 0x8C, 0x20, 0x3C, 0x8d, 0xca, 0x71, 0x68, 0x91, 0x6a, 0x81, 0x97, - 0x5d, 0x58, 0x81, 0x81, 0xb6, 0x45, 0x50, 0xb8, 0x29, 0xa0, 0x31, 0xe1, 0x72, 0x4e, 0x64, 0x30, - }); -} - -std::tuple -generate_big_int_op_constraint_secpk1_fr(BigIntOperationType op, fr lhs, fr rhs, WitnessVector& witness_values) -{ - return generate_big_int_op_constraint_with_modulus( - op, lhs, rhs, witness_values, { 0x41, 0x41, 0x36, 0xD0, 0x8C, 0x5E, 0xD2, 0xBF, 0x3B, 0xA0, 0x48, - 0xAF, 0xE6, 0xDC, 0xAE, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }); -} - -std::tuple -generate_big_int_op_constraint_secpk1_fq(BigIntOperationType op, fr lhs, fr rhs, WitnessVector& witness_values) -{ - return generate_big_int_op_constraint_with_modulus( - op, lhs, rhs, witness_values, { 0x2F, 0xFC, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }); -} -void apply_constraints(AcirFormat& constraint_system, - std::tuple constraints) -{ - constraint_system.bigint_from_le_bytes_constraints.push_back(get<0>(constraints)); - constraint_system.bigint_from_le_bytes_constraints.push_back(get<1>(constraints)); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<3>(constraints)); - constraint_system.bigint_operations.push_back(get<2>(constraints)); -} - -std::tuple generate_big_int_op_constraint_with_id(BigIntOperationType op, - uint32_t lhs_id, - uint32_t rhs_id, - WitnessVector& witness_values) -{ - // lhs_id, rhs_id are big int it, so we can generate the operation directly - auto result = static_cast(witness_values.size()); - BigIntOperation constraint{ - .lhs = lhs_id, - .rhs = rhs_id, - .result = result, - .opcode = op, - }; - // Expecting the result to be just one byte long - BigIntToLeBytes to_bytes{ - .input = result, - .result = { static_cast(witness_values.size()) }, - }; - // overflow is NOT supported, you have to make sure there is no overflow/underflow. - fr value = 0; - switch (op) { - case Add: - value = witness_values[lhs_id] + witness_values[rhs_id]; - break; - case Sub: - value = witness_values[lhs_id] - witness_values[rhs_id]; - break; - case Mul: - value = witness_values[lhs_id] * witness_values[rhs_id]; - break; - case Div: - value = witness_values[lhs_id] / witness_values[rhs_id]; - break; - default: - throw_or_abort("Unexpected BigIntOperationType."); - break; - } - - witness_values.push_back(value); - return { constraint, to_bytes }; -} - -// Based on TestBigIntConstraintSimple, we generate constraints for multiple operations at the same time. -TEST_F(BigIntTests, TestBigIntConstraintMultiple) -{ - WitnessVector witness; - auto contraints = generate_big_int_op_constraint(BigIntOperationType::Add, fr(3), fr(1), witness); - auto contraints2 = generate_big_int_op_constraint(BigIntOperationType::Add, fr(3), fr(1), witness); - auto contraints3 = generate_big_int_op_constraint(BigIntOperationType::Sub, fr(5), fr(2), witness); - auto contraints4 = generate_big_int_op_constraint(BigIntOperationType::Mul, fr(5), fr(3), witness); - auto contraints5 = generate_big_int_op_constraint(BigIntOperationType::Div, fr(8), fr(2), witness); - AcirFormat constraint_system{ - .varnum = static_cast(witness.size() + 1), - .num_acir_opcodes = 5, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - apply_constraints(constraint_system, contraints); - apply_constraints(constraint_system, contraints2); - apply_constraints(constraint_system, contraints3); - apply_constraints(constraint_system, contraints4); - apply_constraints(constraint_system, contraints5); - mock_opcode_indices(constraint_system); - constraint_system.varnum = static_cast(witness.size() + 1); - - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST_F(BigIntTests, TestBigIntConstraintSimple) -{ - // 3 + 3 = 6 - // 3 = bigint(1) = from_bytes(w(1)) - // 6 = bigint(2) = to_bytes(w(2)) - BigIntOperation add_constraint{ - .lhs = 1, - .rhs = 1, - .result = 2, - .opcode = BigIntOperationType::Add, - }; - - BigIntFromLeBytes from_le_bytes_constraint_bigint1{ - .inputs = { 1 }, - .modulus = { 0x47, 0xFD, 0x7C, 0xD8, 0x16, 0x8C, 0x20, 0x3C, 0x8d, 0xca, 0x71, 0x68, 0x91, 0x6a, 0x81, 0x97, - 0x5d, 0x58, 0x81, 0x81, 0xb6, 0x45, 0x50, 0xb8, 0x29, 0xa0, 0x31, 0xe1, 0x72, 0x4e, 0x64, 0x30, }, - .result = 1, - }; - - BigIntToLeBytes result2_to_le_bytes{ - .input = 2, .result = { 2 }, // 3+3=6 - }; - - AcirFormat constraint_system{ - .varnum = 5, - .num_acir_opcodes = 3, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = { from_le_bytes_constraint_bigint1 }, - .bigint_to_le_bytes_constraints = { result2_to_le_bytes }, - .bigint_operations = { add_constraint }, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - WitnessVector witness{ - 0, 3, 6, 3, 0, - }; - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -// Based on TestBigIntConstraintMultiple, we generate constraints re-using the bigfields created by the first two -// operations -TEST_F(BigIntTests, TestBigIntConstraintReuse) -{ - WitnessVector witness; - auto contraints = generate_big_int_op_constraint_secpk1_fr(BigIntOperationType::Add, fr(3), fr(1), witness); - auto contraints2 = generate_big_int_op_constraint_secpk1_fr(BigIntOperationType::Sub, fr(5), fr(2), witness); - auto contraints3 = generate_big_int_op_constraint_with_id(BigIntOperationType::Mul, 0, 5, witness); - auto contraints4 = generate_big_int_op_constraint_with_id(BigIntOperationType::Div, 0, 1, witness); - auto contraints5 = generate_big_int_op_constraint_with_id(BigIntOperationType::Sub, 7, 1, witness); - - AcirFormat constraint_system{ - .varnum = static_cast(witness.size() + 1), - .num_acir_opcodes = 5, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - apply_constraints(constraint_system, contraints); - apply_constraints(constraint_system, contraints2); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints3)); - constraint_system.bigint_operations.push_back(get<0>(contraints3)); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints4)); - constraint_system.bigint_operations.push_back(get<0>(contraints4)); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints5)); - constraint_system.bigint_operations.push_back(get<0>(contraints5)); - constraint_system.varnum = static_cast(witness.size() + 1); - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST_F(BigIntTests, TestBigIntConstraintReuse2) -{ - WitnessVector witness; - auto contraints = generate_big_int_op_constraint_secpk1_fq(BigIntOperationType::Add, fr(3), fr(1), witness); - auto contraints2 = generate_big_int_op_constraint_secpk1_fq(BigIntOperationType::Sub, fr(5), fr(2), witness); - auto contraints3 = generate_big_int_op_constraint_with_id(BigIntOperationType::Add, 0, 5, witness); - auto contraints4 = generate_big_int_op_constraint_with_id(BigIntOperationType::Sub, 0, 1, witness); - auto contraints5 = generate_big_int_op_constraint_with_id(BigIntOperationType::Sub, 7, 1, witness); - - AcirFormat constraint_system{ - .varnum = static_cast(witness.size() + 1), - .num_acir_opcodes = 5, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - apply_constraints(constraint_system, contraints); - apply_constraints(constraint_system, contraints2); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints3)); - constraint_system.bigint_operations.push_back(get<0>(contraints3)); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints4)); - constraint_system.bigint_operations.push_back(get<0>(contraints4)); - constraint_system.bigint_to_le_bytes_constraints.push_back(get<1>(contraints5)); - constraint_system.bigint_operations.push_back(get<0>(contraints5)); - constraint_system.varnum = static_cast(witness.size() + 1); - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST_F(BigIntTests, TestBigIntDIV) -{ - // 6 / 3 = 2 - // 6 = bigint(1) = from_bytes(w(1)) - // 3 = bigint(2) = from_bytes(w(2)) - // 2 = bigint(3) = to_bytes(w(3)) - BigIntOperation div_constraint{ - .lhs = 1, - .rhs = 2, - .result = 3, - .opcode = BigIntOperationType::Div, - }; - - BigIntFromLeBytes from_le_bytes_constraint_bigint1{ - .inputs = { 1 }, - .modulus = { 0x41, 0x41, 0x36, 0xD0, 0x8C, 0x5E, 0xD2, 0xBF, 0x3B, 0xA0, 0x48, 0xAF, 0xE6, 0xDC, 0xAE, 0xBA, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - .result = 1, - }; - BigIntFromLeBytes from_le_bytes_constraint_bigint2{ - .inputs = { 2 }, - .modulus = { 0x41, 0x41, 0x36, 0xD0, 0x8C, 0x5E, 0xD2, 0xBF, 0x3B, 0xA0, 0x48, 0xAF, 0xE6, 0xDC, 0xAE, 0xBA, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - .result = 2, - }; - - BigIntToLeBytes result3_to_le_bytes{ - .input = 3, .result = { 3 }, // - }; - - AcirFormat constraint_system{ - .varnum = 5, - .num_acir_opcodes = 4, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = { from_le_bytes_constraint_bigint1, from_le_bytes_constraint_bigint2 }, - .bigint_to_le_bytes_constraints = { result3_to_le_bytes }, - .bigint_operations = { div_constraint }, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - WitnessVector witness{ - 0, 6, 3, 2, 0, - }; - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} -} // namespace acir_format::tests diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp index fc069279eafc..05d45d6d21bb 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp @@ -118,16 +118,13 @@ void process_ROM_operations(Builder& builder, // For a ROM table, constant read should be optimized out: // The rom_table won't work with a constant read because the table may not be initialized ASSERT(op.index.q_l != 0); - // We create a new witness w to avoid issues with non-valid witness assignements: - // if witness are not assigned, then w will be zero and table[w] will work - fr w_value = 0; - if (has_valid_witness_assignments) { - // If witness are assigned, we use the correct value for w - w_value = index.get_value(); + + // In case of invalid witness assignment, we set the value of index value to zero to not hit out of bound in + // ROM table + if (!has_valid_witness_assignments) { + builder.set_variable(index.witness_index, 0); } - field_ct w = field_ct::from_witness(&builder, w_value); - value.assert_equal(table[w]); - w.assert_equal(index); + value.assert_equal(table[index]); } } @@ -144,12 +141,11 @@ void process_RAM_operations(Builder& builder, for (auto& op : constraint.trace) { field_ct value = poly_to_field_ct(op.value, builder); field_ct index = poly_to_field_ct(op.index, builder); - - // We create a new witness w to avoid issues with non-valid witness assignements. - // If witness are not assigned, then index will be zero and table[index] won't hit bounds check. - fr index_value = has_valid_witness_assignments ? index.get_value() : 0; - // Create new witness and ensure equal to index. - field_ct::from_witness(&builder, index_value).assert_equal(index); + // In case of invalid witness assignment, we set the value of index value to zero to not hit out of bound in + // RAM table + if (!has_valid_witness_assignments) { + builder.set_variable(index.witness_index, 0); + } if (op.access_type == 0) { value.assert_equal(table.read(index)); @@ -179,14 +175,12 @@ void process_call_data_operations(Builder& builder, BB_ASSERT_EQ(op.access_type, 0); field_ct value = poly_to_field_ct(op.value, builder); field_ct index = poly_to_field_ct(op.index, builder); - fr w_value = 0; - if (has_valid_witness_assignments) { - // If witness are assigned, we use the correct value for w - w_value = index.get_value(); + // In case of invalid witness assignment, we set the value of index value to zero to not hit out of bound in + // calldata-array + if (!has_valid_witness_assignments) { + builder.set_variable(index.witness_index, 0); } - field_ct w = field_ct::from_witness(&builder, w_value); - value.assert_equal(calldata_array[w]); - w.assert_equal(index); + value.assert_equal(calldata_array[index]); } }; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp index 22e118cbcce0..9ffda6bba17c 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp @@ -27,9 +27,9 @@ class MegaHonk : public ::testing::Test { // Construct and verify an MegaHonk proof for the provided circuit static bool prove_and_verify(Builder& circuit) { - auto proving_key = std::make_shared>(circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - Prover prover{ proving_key, verification_key }; + auto prover_instance = std::make_shared>(circuit); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + Prover prover{ prover_instance, verification_key }; auto proof = prover.construct_proof(); Verifier verifier{ verification_key }; @@ -141,29 +141,6 @@ TEST_F(UltraPlonkRAM, TestBlockConstraint) .varnum = static_cast(num_variables), .num_acir_opcodes = 7, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, .block_constraints = { block }, .original_opcode_indices = create_empty_original_opcode_indices(), }; @@ -185,32 +162,10 @@ TEST_F(MegaHonk, Databus) .varnum = static_cast(num_variables), .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, .block_constraints = { block }, .original_opcode_indices = create_empty_original_opcode_indices(), }; + mock_opcode_indices(program.constraints); // Construct a bberg circuit from the acir representation @@ -289,29 +244,7 @@ TEST_F(MegaHonk, DatabusReturn) .varnum = static_cast(num_variables), .num_acir_opcodes = 2, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { assert_equal }, - .quad_constraints = {}, - .big_quad_constraints = {}, .block_constraints = { block }, .original_opcode_indices = create_empty_original_opcode_indices(), }; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.cpp new file mode 100644 index 000000000000..7907f68d444a --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.cpp @@ -0,0 +1,130 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== +#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" +#include "barretenberg/dsl/acir_format/recursion_constraint.hpp" +#include "barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp" +#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" +#include "proof_surgeon.hpp" + +namespace acir_format { + +using namespace bb; + +using Builder = bb::UltraCircuitBuilder; // Builder is always Ultra +using field_ct = stdlib::field_t; + +template +using HonkRecursionConstraintOutput = bb::stdlib::recursion::honk::UltraRecursiveVerifierOutput; + +using namespace bb; + +/** + * @brief Creates a dummy vkey and proof object. + * @details Populates the key and proof vectors with dummy values in the write_vk case when we don't have a valid + * witness. The bulk of the logic is setting up certain values correctly like the circuit size, number of public inputs, + * aggregation object, and commitments. + * + * @param builder + * @param proof_size Size of proof with NO public inputs + * @param public_inputs_size Total size of public inputs including aggregation object + * @param key_fields + * @param proof_fields + */ +void create_dummy_vkey_and_proof(Builder& builder, + size_t proof_size, + size_t public_inputs_size, + const std::vector& key_fields, + const std::vector& proof_fields) +{ + using ClientIVCRecursiveVerifier = stdlib::recursion::honk::ClientIVCRecursiveVerifier; + using IO = stdlib::recursion::honk::HidingKernelIO; + + BB_ASSERT_EQ(proof_size, ClientIVCRecursiveVerifier::StdlibProof::PROOF_LENGTH_WITHOUT_PUB_INPUTS()); + + size_t num_inner_public_inputs = public_inputs_size - IO::PUBLIC_INPUTS_SIZE; + uint32_t pub_inputs_offset = MegaZKFlavor::has_zero_row ? 1 : 0; + + // Generate mock honk vk + // Note: log_circuit_size = VIRTUAL_LOG_N + auto honk_vk = create_mock_honk_vk( + 1 << MegaZKFlavor::VIRTUAL_LOG_N, pub_inputs_offset, num_inner_public_inputs); + + // Set honk vk in builder + size_t offset = 0; + for (auto& vk_element : honk_vk->to_field_elements()) { + builder.set_variable(key_fields[offset].witness_index, vk_element); + offset++; + } + + // Generate dummy CIVC proof + bb::HonkProof civc_proof = create_mock_civc_proof(num_inner_public_inputs); + + // Set CIVC proof in builder + offset = 0; + for (auto& proof_element : civc_proof) { + builder.set_variable(proof_fields[offset].witness_index, proof_element); + offset++; + } + + BB_ASSERT_EQ(offset, proof_size + public_inputs_size); +} + +/** + * @brief Add constraints associated with recursive verification of an CIVC proof + * + * @param builder + * @param input + * @param input_points_accumulator_indices + * @param has_valid_witness_assignments + * @return HonkRecursionConstraintOutput {pairing agg object, ipa claim, ipa proof} + */ +[[nodiscard("IPA claim and Pairing points should be accumulated")]] HonkRecursionConstraintOutput +create_civc_recursion_constraints(Builder& builder, + const RecursionConstraint& input, + bool has_valid_witness_assignments) +{ + using ClientIVCRecursiveVerifier = stdlib::recursion::honk::ClientIVCRecursiveVerifier; + using RecursiveVKAndHash = ClientIVCRecursiveVerifier::RecursiveVKAndHash; + using VerificationKey = ClientIVCRecursiveVerifier::RecursiveVK; + using IO = stdlib::recursion::honk::HidingKernelIO; + + BB_ASSERT_EQ(input.proof_type, PROOF_TYPE::CIVC); + + // Reconstruct proof indices from proof and public inputs + std::vector proof_indices = + ProofSurgeon::create_indices_for_reconstructed_proof(input.proof, input.public_inputs); + + // Construct field elements from witness indices + std::vector key_fields = RecursionConstraint::fields_from_witnesses(builder, input.key); + std::vector proof_fields = RecursionConstraint::fields_from_witnesses(builder, proof_indices); + field_ct vk_hash = field_ct::from_witness_index(&builder, input.key_hash); + + if (!has_valid_witness_assignments) { + size_t total_pub_inputs_size = input.public_inputs.size() + IO::PUBLIC_INPUTS_SIZE; + size_t proof_size_without_pub_inputs = input.proof.size() - IO::PUBLIC_INPUTS_SIZE; + + create_dummy_vkey_and_proof( + builder, proof_size_without_pub_inputs, total_pub_inputs_size, key_fields, proof_fields); + } + + // Recursively verify CIVC proof + auto mega_vk = std::make_shared(key_fields); + auto mega_vk_and_hash = std::make_shared(mega_vk, vk_hash); + ClientIVCRecursiveVerifier::StdlibProof stdlib_proof(proof_fields, input.public_inputs.size()); + + ClientIVCRecursiveVerifier verifier(&builder, mega_vk_and_hash); + ClientIVCRecursiveVerifier::Output verification_output = verifier.verify(stdlib_proof); + + // Construct output + HonkRecursionConstraintOutput output; + output.points_accumulator = verification_output.points_accumulator; + output.ipa_claim = verification_output.opening_claim; + output.ipa_proof = verification_output.ipa_proof; + + return output; +} + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.hpp new file mode 100644 index 000000000000..48ff729098b8 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.hpp @@ -0,0 +1,24 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once + +#include "barretenberg/dsl/acir_format/honk_recursion_constraint.hpp" +#include "barretenberg/dsl/acir_format/recursion_constraint.hpp" +#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" + +namespace acir_format { + +using Builder = bb::UltraCircuitBuilder; // Builder is always Ultra + +using namespace bb; + +[[nodiscard("IPA claim and Pairing points should be accumulated")]] HonkRecursionConstraintOutput +create_civc_recursion_constraints(Builder& builder, + const RecursionConstraint& input, + bool has_valid_witness_assignments); + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.test.cpp new file mode 100644 index 000000000000..6cd7d22c61ed --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/civc_recursion_constraints.test.cpp @@ -0,0 +1,141 @@ +#include "barretenberg/client_ivc/mock_circuit_producer.hpp" +#include "barretenberg/dsl/acir_format/acir_format.hpp" +#include "barretenberg/dsl/acir_format/acir_format_mocks.hpp" +#include "barretenberg/dsl/acir_format/proof_surgeon.hpp" +#include "barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp" + +#include + +using namespace acir_format; +using namespace bb; +using namespace bb::stdlib::recursion::honk; + +class CivcRecursionConstraintTest : public ::testing::Test { + public: + using Builder = UltraCircuitBuilder; + + // Types for ClientIVC recursive verifier + using Flavor = UltraRollupFlavor; + using ProverInstance = ProverInstance_; + using VerificationKey = Flavor::VerificationKey; + using ClientIVCRecursiveVerifier = stdlib::recursion::honk::ClientIVCRecursiveVerifier; + + // Types for ClientIVC + using DeciderZKProvingKey = ProverInstance_; + using MegaZKVerificationKey = MegaZKFlavor::VerificationKey; + + // Public inputs added by bb to a ClientIVC proof + static constexpr size_t PUBLIC_INPUTS_SIZE = bb::HidingKernelIO::PUBLIC_INPUTS_SIZE; + + struct ClientIVCData { + std::shared_ptr mega_vk; + ClientIVC::Proof proof; + }; + + static ClientIVCData get_civc_data(TraceSettings trace_settings) + { + static constexpr size_t NUM_APP_CIRCUITS = 2; + + PrivateFunctionExecutionMockCircuitProducer circuit_producer(NUM_APP_CIRCUITS); + + ClientIVC ivc(circuit_producer.total_num_circuits, trace_settings); + + for (size_t idx = 0; idx < circuit_producer.total_num_circuits; idx++) { + circuit_producer.construct_and_accumulate_next_circuit(ivc); + } + + ClientIVC::Proof proof = ivc.prove(); + + return { ivc.get_vk().mega, proof }; + } + + static AcirProgram create_acir_program(const ClientIVCData& civc_data) + { + AcirProgram program; + + // Extract the witnesses from the provided data + auto key_witnesses = civc_data.mega_vk->to_field_elements(); + auto key_hash_witness = civc_data.mega_vk->hash(); + std::vector proof_witnesses = civc_data.proof.to_field_elements(); + + // Construct witness indices for each component in the constraint; populate the witness array + auto [key_indices, key_hash_index, proof_indices, public_inputs_indices] = + ProofSurgeon::populate_recursion_witness_data( + program.witness, + proof_witnesses, + key_witnesses, + key_hash_witness, + /*num_public_inputs_to_extract=*/civc_data.mega_vk->num_public_inputs - PUBLIC_INPUTS_SIZE); + + auto constraint = RecursionConstraint{ .key = key_indices, + .proof = proof_indices, + .public_inputs = public_inputs_indices, + .key_hash = key_hash_index, + .proof_type = PROOF_TYPE::CIVC }; + + // Construct a constraint system + program.constraints.varnum = static_cast(program.witness.size()); + program.constraints.num_acir_opcodes = static_cast(1); + program.constraints.civc_recursion_constraints = { constraint }; + program.constraints.original_opcode_indices = create_empty_original_opcode_indices(); + mock_opcode_indices(program.constraints); + + return program; + } + + static std::shared_ptr get_civc_recursive_verifier_pk(AcirProgram& program) + { + // Build constraints + Builder builder = create_circuit(program, { .honk_recursion = 2 }); + + info("Estimate finalized number of gates: ", builder.get_estimated_num_finalized_gates()); + + // Construct vk + auto prover_instance = std::make_shared(builder); + + return prover_instance; + } + + protected: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } +}; + +TEST_F(CivcRecursionConstraintTest, GenerateRecursiveCivcVerifierVKFromConstraints) +{ + using VerificationKey = CivcRecursionConstraintTest::VerificationKey; + using ClientIVCData = CivcRecursionConstraintTest::ClientIVCData; + + ClientIVCData civc_data = CivcRecursionConstraintTest::get_civc_data(TraceSettings()); + + std::shared_ptr vk_from_valid_witness; + { + AcirProgram program = create_acir_program(civc_data); + auto prover_instance = get_civc_recursive_verifier_pk(program); + vk_from_valid_witness = std::make_shared(prover_instance->get_precomputed()); + + // Prove and verify + UltraProver_ prover(prover_instance, vk_from_valid_witness); + HonkProof proof = prover.prove(); + + VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); + UltraVerifier_ verifier(vk_from_valid_witness, ipa_verification_key); + + // Split the proof + auto ultra_proof = + HonkProof(proof.begin(), proof.begin() + static_cast(proof.size() - IPA_PROOF_LENGTH)); + auto ipa_proof = + HonkProof(proof.begin() + static_cast(proof.size() - IPA_PROOF_LENGTH), proof.end()); + + EXPECT_TRUE(verifier.verify_proof(proof, ipa_proof)); + } + + std::shared_ptr vk_from_constraints; + { + AcirProgram program = create_acir_program(civc_data); + program.witness.clear(); + auto prover_instance = get_civc_recursive_verifier_pk(program); + vk_from_constraints = std::make_shared(prover_instance->get_precomputed()); + } + + EXPECT_EQ(*vk_from_valid_witness, *vk_from_constraints); +} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.cpp index cf95b59b6c51..355235c9879e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.cpp @@ -44,7 +44,7 @@ void create_ec_add_constraint(Builder& builder, const EcAdd& input, bool has_val if (infinite.is_constant()) { builder.fix_witness(input.result_infinite, infinite.get_value()); } else { - builder.assert_equal(infinite.witness_index, input.result_infinite); + builder.assert_equal(infinite.get_normalized_witness_index(), input.result_infinite); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.hpp index 8263b040b225..aba45206c1a3 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.hpp @@ -18,13 +18,25 @@ struct EcAdd { WitnessOrConstant input2_x; WitnessOrConstant input2_y; WitnessOrConstant input2_infinite; + // Predicate indicating whether the constraint should be disabled: + // - true: the constraint is valid + // - false: the constraint is disabled, i.e it must not fail and can return whatever. + WitnessOrConstant predicate; uint32_t result_x; uint32_t result_y; uint32_t result_infinite; // for serialization, update with any new fields - MSGPACK_FIELDS( - input1_x, input1_y, input1_infinite, input2_x, input2_y, input2_infinite, result_x, result_y, result_infinite); + MSGPACK_FIELDS(input1_x, + input1_y, + input1_infinite, + input2_x, + input2_y, + input2_infinite, + predicate, + result_x, + result_y, + result_infinite); friend bool operator==(EcAdd const& lhs, EcAdd const& rhs) = default; }; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.test.cpp index 975016cd3012..69e2ac756e09 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ec_operations.test.cpp @@ -60,30 +60,7 @@ TEST_F(EcOperations, TestECOperations) .varnum = static_cast(num_variables + 1), .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, .ec_add_constraints = { ec_add_constraint }, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -188,30 +165,8 @@ TEST_F(EcOperations, TestECMultiScalarMul) .varnum = static_cast(num_variables + 1), .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, .multi_scalar_mul_constraints = { msm_constrain }, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { assert_equal }, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp new file mode 100644 index 000000000000..66a90fbf1981 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp @@ -0,0 +1,191 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#include "barretenberg/dsl/acir_format/ecdsa_constraints.hpp" +#include "barretenberg/dsl/acir_format/utils.hpp" +#include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" + +namespace acir_format { + +using namespace bb; + +/** + * @brief Create constraints to verify an ECDSA signature + * + * @details Given and ECDSA constraint system, add to the builder constraints that verify the ECDSA signature. We + * perform the following operations: + * 1. Reconstruct byte arrays from builder variables (we enforce that each variable fits in one byte and stack them in + * a vector) and the boolean result from the corresponding builder variable + * 2. Reconstruct the public key from the byte representations (big-endian, 32-byte numbers) of the \f$x\f$ and \f$y\f$ + * coordinates. + * 3. Enforce uniqueness of the representation of the public key by asserting \f$x < q\f$ and \f$y < q\f$, where + * \f$q\f$ is the modulus of the base field of the elliptic curve we are working with. + * 4. Verify the signature against the public key and the hash of the message. We return a bool_t bearing witness to + * whether the signature verification was successfull or not. + * 5. Enforce that the result of the signature verification matches the expected result. + * + * @tparam Curve + * @param builder + * @param input + * @param has_valid_witness_assignments + */ +template +void create_ecdsa_verify_constraints(typename Curve::Builder& builder, + const EcdsaConstraint& input, + bool has_valid_witness_assignments) +{ + using Builder = Curve::Builder; + + using Fq = Curve::fq_ct; + using Fr = Curve::bigfr_ct; + using G1 = Curve::g1_bigfr_ct; + + using field_ct = bb::stdlib::field_t; + using bool_ct = bb::stdlib::bool_t; + using byte_array_ct = bb::stdlib::byte_array; + + // Lambda to convert std::vector to byte_array_ct + auto fields_to_bytes = [](Builder& builder, std::vector& fields) -> byte_array_ct { + byte_array_ct result(&builder); + for (auto& field : fields) { + // Construct byte array of length 1 from the field element + // The constructor enforces that `field` fits in one byte + byte_array_ct byte_to_append(field, /*num_bytes=*/1); + // Append the new byte to the result + result.write(byte_to_append); + } + + return result; + }; + + // Define builder variables based on the witness indices + std::vector hashed_message_fields = fields_from_witnesses(builder, input.hashed_message); + std::vector r_fields = fields_from_witnesses(builder, std::span(input.signature.begin(), 32)); + std::vector s_fields = fields_from_witnesses(builder, std::span(input.signature.begin() + 32, 32)); + std::vector pub_x_fields = fields_from_witnesses(builder, input.pub_x_indices); + std::vector pub_y_fields = fields_from_witnesses(builder, input.pub_y_indices); + field_ct result_field = field_ct::from_witness_index(&builder, input.result); + + if (!has_valid_witness_assignments) { + // Fill builder variables in case of empty witness assignment + create_dummy_ecdsa_constraint( + builder, hashed_message_fields, r_fields, s_fields, pub_x_fields, pub_y_fields, result_field); + } + + // Step 1. + // Construct inputs to signature verification from witness indices + byte_array_ct hashed_message = fields_to_bytes(builder, hashed_message_fields); + byte_array_ct pub_x_bytes = fields_to_bytes(builder, pub_x_fields); + byte_array_ct pub_y_bytes = fields_to_bytes(builder, pub_y_fields); + byte_array_ct r = fields_to_bytes(builder, r_fields); + byte_array_ct s = fields_to_bytes(builder, s_fields); + bool_ct result = static_cast(result_field); // Constructor enforces result_field = 0 or 1 + + // Step 2. + // Reconstruct the public key from the byte representations of its coordinates + Fq pub_x(pub_x_bytes); + Fq pub_y(pub_y_bytes); + G1 public_key(pub_x, pub_y); + + // Step 3. + // Ensure uniqueness of the public key by asserting each of its coordinates is smaller than the modulus of the base + // field + pub_x.assert_is_in_field("ECDSA input validation: the x coordinate of the public key is larger than Fq::modulus"); + pub_y.assert_is_in_field("ECDSA input validation: the y coordinate of the public key is larger than Fq::modulus"); + + // Step 4. + bool_ct signature_result = + stdlib::ecdsa_verify_signature(hashed_message, public_key, { r, s }); + + // Step 5. + // Assert that signature verification returned the expected result + signature_result.assert_equal(result); +} + +/** + * @brief Generate dummy ECDSA constraints when the builder doesn't have witnesses + * + * @details To avoid firing asserts, the public key must be a point on the curve + */ +template +void create_dummy_ecdsa_constraint(typename Curve::Builder& builder, + const std::vector>& hashed_message_fields, + const std::vector>& r_fields, + const std::vector>& s_fields, + const std::vector>& pub_x_fields, + const std::vector>& pub_y_fields, + const stdlib::field_t& result_field) +{ + using Builder = Curve::Builder; + using FqNative = Curve::fq; + using G1Native = Curve::g1; + using field_ct = stdlib::field_t; + + // Lambda to populate builder variables from vector of field values + auto populate_fields = [&builder](const std::vector& fields, const std::vector& values) { + for (auto [field, value] : zip_view(fields, values)) { + builder.set_variable(field.witness_index, value); + } + }; + + // Vector of 32 copies of bb::fr::zero() + std::vector mock_zeros(32, bb::fr::zero()); + + // Hashed message + populate_fields(hashed_message_fields, mock_zeros); + + // Signature + populate_fields(r_fields, mock_zeros); + populate_fields(s_fields, mock_zeros); + + // Pub key + std::array buffer_x; + std::array buffer_y; + std::vector mock_pub_x; + std::vector mock_pub_y; + FqNative::serialize_to_buffer(G1Native::one.x, &buffer_x[0]); + FqNative::serialize_to_buffer(G1Native::one.y, &buffer_y[0]); + for (auto [byte_x, byte_y] : zip_view(buffer_x, buffer_y)) { + mock_pub_x.emplace_back(bb::fr(byte_x)); + mock_pub_y.emplace_back(bb::fr(byte_y)); + } + populate_fields(pub_x_fields, mock_pub_x); + populate_fields(pub_y_fields, mock_pub_y); + + // Result + builder.set_variable(result_field.witness_index, bb::fr::one()); +} + +template void create_ecdsa_verify_constraints>( + UltraCircuitBuilder& builder, const EcdsaConstraint& input, bool has_valid_witness_assignments); +template void create_ecdsa_verify_constraints>( + MegaCircuitBuilder& builder, const EcdsaConstraint& input, bool has_valid_witness_assignments); +template void create_ecdsa_verify_constraints>( + UltraCircuitBuilder& builder, const EcdsaConstraint& input, bool has_valid_witness_assignments); +template void create_ecdsa_verify_constraints>( + MegaCircuitBuilder& builder, const EcdsaConstraint& input, bool has_valid_witness_assignments); + +template void create_dummy_ecdsa_constraint>( + UltraCircuitBuilder&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const stdlib::field_t&); + +template void create_dummy_ecdsa_constraint>( + UltraCircuitBuilder&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const std::vector>&, + const stdlib::field_t&); + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp new file mode 100644 index 000000000000..0f2733d477bb --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp @@ -0,0 +1,73 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once +#include "barretenberg/crypto/ecdsa/ecdsa.hpp" +#include "barretenberg/dsl/acir_format/witness_constant.hpp" +#include "barretenberg/serialize/msgpack.hpp" +#include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" +#include + +namespace acir_format { + +using namespace bb; + +/** + * @brief ECDSA constraints + * + * @details ECDSA constraints have five components: + * 1. `hashed_message`, an array of length 32 representing the witness indices of the byte representation of the hash + * of the message for which the signature must be verified + * 2. `signature`, an array of length 64 representing the witness indices of the signature \f$(r, s)\f$ which must be + * verified. The components are represented as big-endian, 32-byte numbers. + * 3. `pub_x_indices`, an array of length 32 representing the witness indices of the byte representation the x + * coordinate of the public key against which the signature should be verified. + * 4. `pub_y_indices`, an array of length 32 representing the witness indices of the byte representation the y + * coordinate of the public key against which the signature should be verified. + * 5. `result`, an array of length 1 representing the witness index of the expected result of the signature + * verification. + */ +struct EcdsaConstraint { + // The byte representation of the hashed message. + std::array hashed_message; + + // The signature + std::array signature; + + // The public key against which the signature must be verified. + // Since Fr does not have enough bits to represent the prime field in + // secp256k1 or secp256r1, a byte array is used. + std::array pub_x_indices; + std::array pub_y_indices; + + // Predicate indicating whether the constraint should be disabled: + // - true: the constraint is valid + // - false: the constraint is disabled, i.e it must not fail and can return whatever. + WitnessOrConstant predicate; + + // Expected result of signature verification + uint32_t result; + + // For serialization, update with any new fields + MSGPACK_FIELDS(hashed_message, signature, pub_x_indices, pub_y_indices, predicate, result); + friend bool operator==(EcdsaConstraint const& lhs, EcdsaConstraint const& rhs) = default; +}; + +template +void create_ecdsa_verify_constraints(typename Curve::Builder& builder, + const EcdsaConstraint& input, + bool has_valid_witness_assignments = true); + +template +void create_dummy_ecdsa_constraint(typename Curve::Builder& builder, + const std::vector>& hashed_message_fields, + const std::vector>& r_fields, + const std::vector>& s_fields, + const std::vector>& pub_x_fields, + const std::vector>& pub_y_fields, + const stdlib::field_t& result_field); + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp new file mode 100644 index 000000000000..c832b44909d7 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp @@ -0,0 +1,203 @@ +#include "barretenberg/dsl/acir_format/ecdsa_constraints.hpp" +#include "acir_format.hpp" +#include "acir_format_mocks.hpp" +#include "barretenberg/crypto/ecdsa/ecdsa.hpp" +#include "barretenberg/dsl/acir_format/utils.hpp" +#include "barretenberg/dsl/acir_format/witness_constant.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" + +#include +#include +#include + +using namespace bb; +using namespace bb::crypto; +using namespace acir_format; + +template class EcdsaConstraintsTest : public ::testing::Test { + public: + using Builder = Curve::Builder; + using FrNative = Curve::fr; + using FqNative = Curve::fq; + using G1Native = Curve::g1; + using Flavor = std::conditional_t, UltraFlavor, MegaFlavor>; + + // Reproducible test + static constexpr FrNative private_key = + FrNative("0xd67abee717b3fc725adf59e2cc8cd916435c348b277dd814a34e3ceb279436c2"); + + static size_t generate_ecdsa_constraint(EcdsaConstraint& ecdsa_constraint, + WitnessVector& witness_values, + bool tweak_pub_key_x = false, + bool tweak_pub_key_y = false) + { + std::string message_string = "Instructions unclear, ask again later."; + + // Hash the message + std::vector message_buffer(message_string.begin(), message_string.end()); + std::array hashed_message = Sha256Hasher::hash(message_buffer); + + // Generate ECDSA key pair + ecdsa_key_pair account; + account.private_key = private_key; + account.public_key = G1Native::one * account.private_key; + + // Generate signature + ecdsa_signature signature = + ecdsa_construct_signature(message_string, account); + + // Serialize public key coordinates into bytes + std::array buffer_x; + std::array buffer_y; + FqNative::serialize_to_buffer(account.public_key.x, &buffer_x[0]); + FqNative::serialize_to_buffer(account.public_key.y, &buffer_y[0]); + if (tweak_pub_key_x || tweak_pub_key_y) { + std::vector modulus_plus_one = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30 }; + for (auto [byte, tweaked_byte] : zip_view(tweak_pub_key_x ? buffer_x : buffer_y, modulus_plus_one)) { + byte = tweaked_byte; + } + } + + // Create witness indices and witnesses + size_t num_variables = 0; + + std::array hashed_message_indices = + add_to_witness_and_track_indices(witness_values, std::span(hashed_message)); + num_variables += hashed_message_indices.size(); + + std::array pub_x_indices = + add_to_witness_and_track_indices(witness_values, std::span(buffer_x)); + num_variables += pub_x_indices.size(); + + std::array pub_y_indices = + add_to_witness_and_track_indices(witness_values, std::span(buffer_y)); + num_variables += pub_y_indices.size(); + + std::array r_indices = + add_to_witness_and_track_indices(witness_values, std::span(signature.r)); + num_variables += r_indices.size(); + + std::array s_indices = + add_to_witness_and_track_indices(witness_values, std::span(signature.s)); + num_variables += s_indices.size(); + + uint32_t result_index = static_cast(num_variables); + bb::fr result = bb::fr::one(); + witness_values.emplace_back(result); + num_variables += 1; + + // Restructure vectors into array + std::array signature_indices; + std::ranges::copy(r_indices, signature_indices.begin()); + std::ranges::copy(s_indices, signature_indices.begin() + 32); + + ecdsa_constraint = EcdsaConstraint{ .hashed_message = hashed_message_indices, + .signature = signature_indices, + .pub_x_indices = pub_x_indices, + .pub_y_indices = pub_y_indices, + .predicate = WitnessOrConstant::from_constant(bb::fr::one()), + .result = result_index }; + + return num_variables; + } + + static std::pair generate_constraint_system(bool tweak_pub_key_x = false, + bool tweak_pub_key_y = false) + { + EcdsaConstraint ecdsa_constraint; + WitnessVector witness_values; + size_t num_variables = + generate_ecdsa_constraint(ecdsa_constraint, witness_values, tweak_pub_key_x, tweak_pub_key_y); + AcirFormat constraint_system = { + .varnum = static_cast(num_variables), + .num_acir_opcodes = 1, + .public_inputs = {}, + .original_opcode_indices = create_empty_original_opcode_indices(), + }; + + if constexpr (Curve::type == bb::CurveType::SECP256K1) { + constraint_system.ecdsa_k1_constraints = { ecdsa_constraint }; + } else { + constraint_system.ecdsa_r1_constraints = { ecdsa_constraint }; + } + + mock_opcode_indices(constraint_system); + + return { constraint_system, witness_values }; + } + + protected: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } +}; + +using CurveTypes = testing::Types, + stdlib::secp256r1, + stdlib::secp256k1, + stdlib::secp256r1>; + +TYPED_TEST_SUITE(EcdsaConstraintsTest, CurveTypes); + +TYPED_TEST(EcdsaConstraintsTest, GenerateVKFromConstraints) +{ + using Flavor = TestFixture::Flavor; + using Builder = TestFixture::Builder; + using ProvingKey = ProverInstance_; + using VerificationKey = Flavor::VerificationKey; + + auto [constraint_system, witness_values] = TestFixture::generate_constraint_system(); + + std::shared_ptr vk_from_witness; + { + AcirProgram program{ constraint_system, witness_values }; + auto builder = create_circuit(program); + info("Num gates: ", builder.get_estimated_num_finalized_gates()); + + auto prover_instance = std::make_shared(builder); + vk_from_witness = std::make_shared(prover_instance->get_precomputed()); + + // Validate the builder + EXPECT_TRUE(CircuitChecker::check(builder)); + } + + std::shared_ptr vk_from_constraint; + { + AcirProgram program{ constraint_system, /*witness=*/{} }; + auto builder = create_circuit(program); + auto prover_instance = std::make_shared(builder); + vk_from_constraint = std::make_shared(prover_instance->get_precomputed()); + } + + EXPECT_EQ(*vk_from_witness, *vk_from_constraint); +} + +TYPED_TEST(EcdsaConstraintsTest, NonUniquePubKey) +{ + // Disable asserts otherwise the test fails because the public keys are not on the curve + BB_DISABLE_ASSERTS(); + + for (size_t idx = 0; idx < 2; idx++) { + bool tweak_x = idx == 0; + bool tweak_y = idx == 1; + std::string failure_msg = + idx == 0 + ? "ECDSA input validation: the x coordinate of the public key is larger than Fq::modulus: hi limb." + : "ECDSA input validation: the y coordinate of the public key is larger than Fq::modulus: hi limb."; + + using Builder = TestFixture::Builder; + + auto [constraint_system, witness_values] = + TestFixture::generate_constraint_system(/*tweak_pub_key_x=*/tweak_x, /*tweak_pub_key_y=*/tweak_y); + + AcirProgram program{ constraint_system, witness_values }; + auto builder = create_circuit(program); + + // Validate the builder + EXPECT_FALSE(CircuitChecker::check(builder)); + + // Check error message + EXPECT_EQ(builder.err(), failure_msg); + } +} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.cpp deleted file mode 100644 index e3ba2a997537..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.cpp +++ /dev/null @@ -1,157 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#include "ecdsa_secp256k1.hpp" -#include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" - -namespace acir_format { - -using namespace bb; -using secp256k1_ct = bb::stdlib::secp256k1; - -template -secp256k1_ct::g1_ct ecdsa_convert_inputs(Builder* ctx, const bb::secp256k1::g1::affine_element& input) -{ - uint256_t x_u256(input.x); - uint256_t y_u256(input.y); - secp256k1_ct::fq_ct x( - witness_ct(ctx, bb::fr(x_u256.slice(0, secp256k1_ct::fq_ct::NUM_LIMB_BITS * 2))), - witness_ct( - ctx, bb::fr(x_u256.slice(secp256k1_ct::fq_ct::NUM_LIMB_BITS * 2, secp256k1_ct::fq_ct::NUM_LIMB_BITS * 4)))); - secp256k1_ct::fq_ct y( - witness_ct(ctx, bb::fr(y_u256.slice(0, secp256k1_ct::fq_ct::NUM_LIMB_BITS * 2))), - witness_ct( - ctx, bb::fr(y_u256.slice(secp256k1_ct::fq_ct::NUM_LIMB_BITS * 2, secp256k1_ct::fq_ct::NUM_LIMB_BITS * 4)))); - - return { x, y }; -} - -witness_ct ecdsa_index_to_witness(Builder& builder, uint32_t index) -{ - fr value = builder.get_variable(index); - return { &builder, value }; -} - -template -void create_ecdsa_k1_verify_constraints(Builder& builder, - const EcdsaSecp256k1Constraint& input, - bool has_valid_witness_assignments) -{ - using secp256k1_ct = bb::stdlib::secp256k1; - using field_ct = bb::stdlib::field_t; - using bool_ct = bb::stdlib::bool_t; - using byte_array_ct = bb::stdlib::byte_array; - - if (has_valid_witness_assignments == false) { - dummy_ecdsa_constraint(builder, input); - } - - auto new_sig = ecdsa_convert_signature(builder, input.signature); - - byte_array_ct message = ecdsa_array_of_bytes_to_byte_array(builder, input.hashed_message); - auto pub_key_x_byte_arr = ecdsa_array_of_bytes_to_byte_array(builder, input.pub_x_indices); - auto pub_key_y_byte_arr = ecdsa_array_of_bytes_to_byte_array(builder, input.pub_y_indices); - - auto pub_key_x_fq = typename secp256k1_ct::fq_ct(pub_key_x_byte_arr); - auto pub_key_y_fq = typename secp256k1_ct::fq_ct(pub_key_y_byte_arr); - - std::vector rr(new_sig.r.begin(), new_sig.r.end()); - std::vector ss(new_sig.s.begin(), new_sig.s.end()); - std::vector vv = { new_sig.v }; - - stdlib::ecdsa_signature sig{ stdlib::byte_array(&builder, rr), - stdlib::byte_array(&builder, ss), - stdlib::byte_array(&builder, vv) }; - - pub_key_x_fq.assert_is_in_field(); - pub_key_y_fq.assert_is_in_field(); - typename secp256k1_ct::g1_bigfr_ct public_key = typename secp256k1_ct::g1_bigfr_ct(pub_key_x_fq, pub_key_y_fq); - for (size_t i = 0; i < 32; ++i) { - sig.r[i].assert_equal(field_ct::from_witness_index(&builder, input.signature[i])); - sig.s[i].assert_equal(field_ct::from_witness_index(&builder, input.signature[i + 32])); - pub_key_x_byte_arr[i].assert_equal(field_ct::from_witness_index(&builder, input.pub_x_indices[i])); - pub_key_y_byte_arr[i].assert_equal(field_ct::from_witness_index(&builder, input.pub_y_indices[i])); - } - for (size_t i = 0; i < input.hashed_message.size(); ++i) { - message[i].assert_equal(field_ct::from_witness_index(&builder, input.hashed_message[i])); - } - - bool_ct signature_result = - stdlib::ecdsa_verify_signature_prehashed_message_noassert( - message, public_key, sig); - bool_ct signature_result_normalized = signature_result.normalize(); - builder.assert_equal(signature_result_normalized.witness_index, input.result); -} - -// Add dummy constraints for ECDSA because when the verifier creates the -// constraint system, they usually use zeroes for witness values. -// -// This does not work for ECDSA as the signature, r, s and public key need -// to be valid. -template void dummy_ecdsa_constraint(Builder& builder, EcdsaSecp256k1Constraint const& input) -{ - - std::array pub_x_indices_; - std::array pub_y_indices_; - std::array signature_; - std::array message_indices_; - - // Create a valid signature with a valid public key - crypto::ecdsa_key_pair account; - account.private_key = 10; - account.public_key = secp256k1_ct::g1::one * account.private_key; - uint256_t pub_x_value = account.public_key.x; - uint256_t pub_y_value = account.public_key.y; - std::string message_string = "Instructions unclear, ask again later."; - crypto::ecdsa_signature signature = - crypto::ecdsa_construct_signature( - message_string, account); - - // Create new variables which will reference the valid public key and signature. - // We don't use them in a gate, so when we call assert_equal, they will be - // replaced as if they never existed. - for (size_t i = 0; i < 32; ++i) { - uint32_t m_wit = builder.add_variable(input.hashed_message[i]); - uint32_t x_wit = builder.add_variable(pub_x_value.slice(248 - i * 8, 256 - i * 8)); - uint32_t y_wit = builder.add_variable(pub_y_value.slice(248 - i * 8, 256 - i * 8)); - uint32_t r_wit = builder.add_variable(signature.r[i]); - uint32_t s_wit = builder.add_variable(signature.s[i]); - message_indices_[i] = m_wit; - pub_x_indices_[i] = x_wit; - pub_y_indices_[i] = y_wit; - signature_[i] = r_wit; - signature_[i + 32] = s_wit; - } - - // Call assert_equal(from, to) to replace the value in `to` by the value in `from` - for (size_t i = 0; i < input.hashed_message.size(); ++i) { - builder.assert_equal(message_indices_[i], input.hashed_message[i]); - } - for (size_t i = 0; i < input.pub_x_indices.size(); ++i) { - builder.assert_equal(pub_x_indices_[i], input.pub_x_indices[i]); - } - for (size_t i = 0; i < input.pub_y_indices.size(); ++i) { - builder.assert_equal(pub_y_indices_[i], input.pub_y_indices[i]); - } - for (size_t i = 0; i < input.signature.size(); ++i) { - builder.assert_equal(signature_[i], input.signature[i]); - } -} - -template void create_ecdsa_k1_verify_constraints(UltraCircuitBuilder& builder, - const EcdsaSecp256k1Constraint& input, - bool has_valid_witness_assignments); -template void create_ecdsa_k1_verify_constraints(MegaCircuitBuilder& builder, - const EcdsaSecp256k1Constraint& input, - bool has_valid_witness_assignments); -template void dummy_ecdsa_constraint(UltraCircuitBuilder& builder, - EcdsaSecp256k1Constraint const& input); - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.hpp deleted file mode 100644 index 10c63ae2b49f..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.hpp +++ /dev/null @@ -1,115 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once -#include "barretenberg/crypto/ecdsa/ecdsa.hpp" -#include "barretenberg/serialize/msgpack.hpp" -#include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" -#include "barretenberg/stdlib/primitives/witness/witness.hpp" -#include - -namespace acir_format { - -using Builder = bb::UltraCircuitBuilder; -using witness_ct = bb::stdlib::witness_t; - -struct EcdsaSecp256k1Constraint { - // This is the byte representation of the hashed message. - std::array hashed_message; - - // This is the computed signature - // - std::array signature; - - // This is the supposed public key which signed the - // message, giving rise to the signature. - // Since Fr does not have enough bits to represent - // the prime field in secp256k1, a byte array is used. - // Can also use low and hi where lo=128 bits - std::array pub_x_indices; - std::array pub_y_indices; - - // This is the result of verifying the signature - uint32_t result; - - // for serialization, update with any new fields - MSGPACK_FIELDS(hashed_message, signature, pub_x_indices, pub_y_indices, result); - friend bool operator==(EcdsaSecp256k1Constraint const& lhs, EcdsaSecp256k1Constraint const& rhs) = default; -}; - -template -void create_ecdsa_k1_verify_constraints(Builder& builder, - const EcdsaSecp256k1Constraint& input, - bool has_valid_witness_assignments = true); - -template void dummy_ecdsa_constraint(Builder& builder, EcdsaSecp256k1Constraint const& input); - -witness_ct ecdsa_index_to_witness(Builder& builder, uint32_t index); -template -bb::stdlib::byte_array ecdsa_array_of_bytes_to_byte_array(Builder& builder, - std::array vector_of_bytes) -{ - using byte_array_ct = bb::stdlib::byte_array; - using field_ct = bb::stdlib::field_t; - - byte_array_ct arr(&builder); - - // Get the witness assignment for each witness index - // Write the witness assignment to the byte_array - for (const auto& witness_index : vector_of_bytes) { - - field_ct element = field_ct::from_witness_index(&builder, witness_index); - size_t num_bytes = 1; - - byte_array_ct element_bytes(element, num_bytes); - arr.write(element_bytes); - } - return arr; -} - -// We have the implementation of this template in the header as this method is used -// by other ecdsa constraints over different curves (e.g. secp256r1). -// gcc needs to be able to see the implementation order to generate code for -// all Builder specializations (e.g. bb::Goblin::Builder vs. bb::UltraCircuitBuilder) -template -bb::crypto::ecdsa_signature ecdsa_convert_signature(Builder& builder, std::array signature) -{ - - bb::crypto::ecdsa_signature signature_cr; - - // Get the witness assignment for each witness index - // Write the witness assignment to the byte_array - - for (unsigned int i = 0; i < 32; i++) { - auto witness_index = signature[i]; - - std::vector fr_bytes(sizeof(bb::fr)); - - bb::fr value = builder.get_variable(witness_index); - - bb::fr::serialize_to_buffer(value, &fr_bytes[0]); - - signature_cr.r[i] = fr_bytes.back(); - } - - for (unsigned int i = 32; i < 64; i++) { - auto witness_index = signature[i]; - - std::vector fr_bytes(sizeof(bb::fr)); - - bb::fr value = builder.get_variable(witness_index); - - bb::fr::serialize_to_buffer(value, &fr_bytes[0]); - - signature_cr.s[i - 32] = fr_bytes.back(); - } - - signature_cr.v = 27; - - return signature_cr; -} - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp deleted file mode 100644 index 7190b916efdb..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp +++ /dev/null @@ -1,227 +0,0 @@ -#include "ecdsa_secp256k1.hpp" -#include "acir_format.hpp" -#include "acir_format_mocks.hpp" -#include "barretenberg/crypto/ecdsa/ecdsa.hpp" - -#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" - -#include -#include - -using namespace bb; -using namespace bb::crypto; -using namespace acir_format; -using curve_ct = stdlib::secp256k1; - -class ECDSASecp256k1 : public ::testing::Test { - protected: - static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } -}; - -size_t generate_ecdsa_constraint(EcdsaSecp256k1Constraint& ecdsa_constraint, WitnessVector& witness_values) -{ - std::string message_string = "Instructions unclear, ask again later."; - - // hash the message since the dsl ecdsa gadget uses the prehashed message - // NOTE: If the hash being used outputs more than 32 bytes, then big-field will panic - std::vector message_buffer; - std::copy(message_string.begin(), message_string.end(), std::back_inserter(message_buffer)); - auto hashed_message = sha256(message_buffer); - - ecdsa_key_pair account; - account.private_key = curve_ct::fr::random_element(); - account.public_key = curve_ct::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - uint256_t pub_x_value = account.public_key.x; - uint256_t pub_y_value = account.public_key.y; - - std::array message_in; - std::array pub_x_indices_in; - std::array pub_y_indices_in; - std::array signature_in; - size_t offset = 0; - for (size_t i = 0; i < hashed_message.size(); ++i) { - message_in[i] = static_cast(i + offset); - const auto byte = static_cast(hashed_message[i]); - witness_values.emplace_back(byte); - } - offset += message_in.size(); - - for (size_t i = 0; i < 32; ++i) { - pub_x_indices_in[i] = static_cast(i + offset); - witness_values.emplace_back(pub_x_value.slice(248 - i * 8, 256 - i * 8)); - } - offset += pub_x_indices_in.size(); - for (size_t i = 0; i < 32; ++i) { - pub_y_indices_in[i] = static_cast(i + offset); - witness_values.emplace_back(pub_y_value.slice(248 - i * 8, 256 - i * 8)); - } - offset += pub_y_indices_in.size(); - for (size_t i = 0; i < 32; ++i) { - signature_in[i] = static_cast(i + offset); - witness_values.emplace_back(signature.r[i]); - } - offset += signature.r.size(); - for (size_t i = 0; i < 32; ++i) { - signature_in[i + 32] = static_cast(i + offset); - witness_values.emplace_back(signature.s[i]); - } - offset += signature.s.size(); - - witness_values.emplace_back(1); - const auto result_in = static_cast(offset); - offset += 1; - witness_values.emplace_back(1); - - ecdsa_constraint = EcdsaSecp256k1Constraint{ .hashed_message = message_in, - .signature = signature_in, - .pub_x_indices = pub_x_indices_in, - .pub_y_indices = pub_y_indices_in, - .result = result_in }; - return offset; -} - -TEST_F(ECDSASecp256k1, TestECDSAConstraintSucceed) -{ - EcdsaSecp256k1Constraint ecdsa_k1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_k1_constraint, witness_values); - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = { ecdsa_k1_constraint }, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness_values }; - auto builder = create_circuit(program); - - EXPECT_EQ(builder.get_variable(ecdsa_k1_constraint.result), 1); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -// Test that the verifier can create an ECDSA circuit. -// The ECDSA circuit requires that certain dummy data is valid -// even though we are just building the circuit. -TEST_F(ECDSASecp256k1, TestECDSACompilesForVerifier) -{ - EcdsaSecp256k1Constraint ecdsa_k1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_k1_constraint, witness_values); - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = { ecdsa_k1_constraint }, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, /*witness=*/{} }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST_F(ECDSASecp256k1, TestECDSAConstraintFail) -{ - EcdsaSecp256k1Constraint ecdsa_k1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_k1_constraint, witness_values); - - // set result value to be false - witness_values[witness_values.size() - 1] = 0; - - // tamper with signature - witness_values[witness_values.size() - 20] += 1; - - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = { ecdsa_k1_constraint }, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness_values }; - auto builder = create_circuit(program); - EXPECT_EQ(builder.get_variable(ecdsa_k1_constraint.result), 0); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.cpp deleted file mode 100644 index 821ec9fcb409..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.cpp +++ /dev/null @@ -1,163 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#include "ecdsa_secp256r1.hpp" -#include "barretenberg/crypto/ecdsa/ecdsa.hpp" -#include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" -#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" -#include "ecdsa_secp256k1.hpp" - -using namespace bb; -using namespace bb::crypto; - -namespace acir_format { - -using secp256r1_ct = stdlib::secp256r1; - -secp256r1_ct::g1_ct ecdsa_convert_inputs(Builder* ctx, const secp256r1::g1::affine_element& input) -{ - uint256_t x_u256(input.x); - uint256_t y_u256(input.y); - secp256r1_ct::fq_ct x( - witness_ct(ctx, bb::fr(x_u256.slice(0, secp256r1_ct::fq_ct::NUM_LIMB_BITS * 2))), - witness_ct( - ctx, bb::fr(x_u256.slice(secp256r1_ct::fq_ct::NUM_LIMB_BITS * 2, secp256r1_ct::fq_ct::NUM_LIMB_BITS * 4)))); - secp256r1_ct::fq_ct y( - witness_ct(ctx, bb::fr(y_u256.slice(0, secp256r1_ct::fq_ct::NUM_LIMB_BITS * 2))), - witness_ct( - ctx, bb::fr(y_u256.slice(secp256r1_ct::fq_ct::NUM_LIMB_BITS * 2, secp256r1_ct::fq_ct::NUM_LIMB_BITS * 4)))); - - return { x, y }; -} - -template -void create_ecdsa_r1_verify_constraints(Builder& builder, - const EcdsaSecp256r1Constraint& input, - bool has_valid_witness_assignments) -{ - using secp256r1_ct = bb::stdlib::secp256r1; - using bool_ct = bb::stdlib::bool_t; - using field_ct = bb::stdlib::field_t; - using byte_array_ct = bb::stdlib::byte_array; - - if (has_valid_witness_assignments == false) { - dummy_ecdsa_constraint(builder, input); - } - - auto new_sig = ecdsa_convert_signature(builder, input.signature); - - byte_array_ct message = ecdsa_array_of_bytes_to_byte_array(builder, input.hashed_message); - auto pub_key_x_byte_arr = ecdsa_array_of_bytes_to_byte_array(builder, input.pub_x_indices); - auto pub_key_y_byte_arr = ecdsa_array_of_bytes_to_byte_array(builder, input.pub_y_indices); - - auto pub_key_x_fq = typename secp256r1_ct::fq_ct(pub_key_x_byte_arr); - auto pub_key_y_fq = typename secp256r1_ct::fq_ct(pub_key_y_byte_arr); - - std::vector rr(new_sig.r.begin(), new_sig.r.end()); - std::vector ss(new_sig.s.begin(), new_sig.s.end()); - std::vector vv = { new_sig.v }; - - stdlib::ecdsa_signature sig{ stdlib::byte_array(&builder, rr), - stdlib::byte_array(&builder, ss), - stdlib::byte_array(&builder, vv) }; - - pub_key_x_fq.assert_is_in_field(); - pub_key_y_fq.assert_is_in_field(); - typename secp256r1_ct::g1_bigfr_ct public_key = typename secp256r1_ct::g1_bigfr_ct(pub_key_x_fq, pub_key_y_fq); - for (size_t i = 0; i < 32; ++i) { - sig.r[i].assert_equal(field_ct::from_witness_index(&builder, input.signature[i])); - sig.s[i].assert_equal(field_ct::from_witness_index(&builder, input.signature[i + 32])); - pub_key_x_byte_arr[i].assert_equal(field_ct::from_witness_index(&builder, input.pub_x_indices[i])); - pub_key_y_byte_arr[i].assert_equal(field_ct::from_witness_index(&builder, input.pub_y_indices[i])); - } - for (size_t i = 0; i < input.hashed_message.size(); ++i) { - message[i].assert_equal(field_ct::from_witness_index(&builder, input.hashed_message[i])); - } - - bool_ct signature_result = - stdlib::ecdsa_verify_signature_prehashed_message_noassert( - message, public_key, sig); - bool_ct signature_result_normalized = signature_result.normalize(); - builder.assert_equal(signature_result_normalized.witness_index, input.result); -} - -// Add dummy constraints for ECDSA because when the verifier creates the -// constraint system, they usually use zeroes for witness values. -// -// This does not work for ECDSA as the signature, r, s and public key need -// to be valid. -template void dummy_ecdsa_constraint(Builder& builder, EcdsaSecp256r1Constraint const& input) -{ - - std::array pub_x_indices_; - std::array pub_y_indices_; - std::array signature_; - std::array message_indices_; - - // Create a valid signature with a valid public key - std::string message_string = "Instructions unclear, ask again later."; - - // hash the message since the dsl ecdsa gadget uses the prehashed message - // NOTE: If the hash being used outputs more than 32 bytes, then big-field will panic - std::vector message_buffer; - std::copy(message_string.begin(), message_string.end(), std::back_inserter(message_buffer)); - auto hashed_message = sha256(message_buffer); - - ecdsa_key_pair account; - account.private_key = 10; - account.public_key = secp256r1::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - uint256_t pub_x_value = account.public_key.x; - uint256_t pub_y_value = account.public_key.y; - - // Create new variables which will reference the valid public key and signature. - // We don't use them in a gate, so when we call assert_equal, they will be - // replaced as if they never existed. - for (size_t i = 0; i < 32; ++i) { - uint32_t m_wit = builder.add_variable(hashed_message[i]); - uint32_t x_wit = builder.add_variable(pub_x_value.slice(248 - i * 8, 256 - i * 8)); - uint32_t y_wit = builder.add_variable(pub_y_value.slice(248 - i * 8, 256 - i * 8)); - uint32_t r_wit = builder.add_variable(signature.r[i]); - uint32_t s_wit = builder.add_variable(signature.s[i]); - message_indices_[i] = m_wit; - pub_x_indices_[i] = x_wit; - pub_y_indices_[i] = y_wit; - signature_[i] = r_wit; - signature_[i + 32] = s_wit; - } - - // Call assert_equal(from, to) to replace the value in `to` by the value in `from` - for (size_t i = 0; i < input.hashed_message.size(); ++i) { - builder.assert_equal(message_indices_[i], input.hashed_message[i]); - } - for (size_t i = 0; i < input.pub_x_indices.size(); ++i) { - builder.assert_equal(pub_x_indices_[i], input.pub_x_indices[i]); - } - for (size_t i = 0; i < input.pub_y_indices.size(); ++i) { - builder.assert_equal(pub_y_indices_[i], input.pub_y_indices[i]); - } - for (size_t i = 0; i < input.signature.size(); ++i) { - builder.assert_equal(signature_[i], input.signature[i]); - } -} - -template void create_ecdsa_r1_verify_constraints(UltraCircuitBuilder& builder, - const EcdsaSecp256r1Constraint& input, - bool has_valid_witness_assignments); -template void create_ecdsa_r1_verify_constraints(MegaCircuitBuilder& builder, - const EcdsaSecp256r1Constraint& input, - bool has_valid_witness_assignments); -template void dummy_ecdsa_constraint(UltraCircuitBuilder& builder, - EcdsaSecp256r1Constraint const& input); - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.hpp deleted file mode 100644 index 62c3061d0f23..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once -#include "barretenberg/common/serialize.hpp" -#include -#include -#include - -namespace acir_format { - -struct EcdsaSecp256r1Constraint { - // This is the byte representation of the hashed message. - std::array hashed_message; - - // This is the supposed public key which signed the - // message, giving rise to the signature. - // Since Fr does not have enough bits to represent - // the prime field in secp256r1, a byte array is used. - // Can also use low and hi where lo=128 bits - std::array pub_x_indices; - std::array pub_y_indices; - - // This is the result of verifying the signature - uint32_t result; - - // This is the computed signature - // - std::array signature; - - friend bool operator==(EcdsaSecp256r1Constraint const& lhs, EcdsaSecp256r1Constraint const& rhs) = default; -}; - -template -void create_ecdsa_r1_verify_constraints(Builder& builder, - const EcdsaSecp256r1Constraint& input, - bool has_valid_witness_assignments = true); - -template void dummy_ecdsa_constraint(Builder& builder, EcdsaSecp256r1Constraint const& input); - -template inline void read(B& buf, EcdsaSecp256r1Constraint& constraint) -{ - using serialize::read; - read(buf, constraint.hashed_message); - read(buf, constraint.signature); - read(buf, constraint.pub_x_indices); - read(buf, constraint.pub_y_indices); - read(buf, constraint.result); -} - -template inline void write(B& buf, EcdsaSecp256r1Constraint const& constraint) -{ - using serialize::write; - write(buf, constraint.hashed_message); - write(buf, constraint.signature); - write(buf, constraint.pub_x_indices); - write(buf, constraint.pub_y_indices); - write(buf, constraint.result); -} - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.test.cpp deleted file mode 100644 index 3e03751085ef..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256r1.test.cpp +++ /dev/null @@ -1,313 +0,0 @@ -#include "ecdsa_secp256r1.hpp" -#include "acir_format.hpp" -#include "acir_format_mocks.hpp" -#include "barretenberg/crypto/ecdsa/ecdsa.hpp" - -#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" - -#include -#include - -using namespace bb; -using namespace bb::crypto; -using namespace acir_format; - -using curve_ct = stdlib::secp256r1; - -// Generate r1 constraints given pre generated pubkey, sig and message values -size_t generate_r1_constraints(EcdsaSecp256r1Constraint& ecdsa_r1_constraint, - WitnessVector& witness_values, - uint256_t pub_x_value, - uint256_t pub_y_value, - std::array hashed_message, - ecdsa_signature signature) -{ - - std::array message_in; - std::array pub_x_indices_in; - std::array pub_y_indices_in; - std::array signature_in; - size_t offset = 0; - for (size_t i = 0; i < hashed_message.size(); ++i) { - message_in[i] = static_cast(i + offset); - const auto byte = static_cast(hashed_message[i]); - witness_values.emplace_back(byte); - } - offset += message_in.size(); - - for (size_t i = 0; i < 32; ++i) { - pub_x_indices_in[i] = static_cast(i + offset); - witness_values.emplace_back(pub_x_value.slice(248 - i * 8, 256 - i * 8)); - } - offset += pub_x_indices_in.size(); - for (size_t i = 0; i < 32; ++i) { - pub_y_indices_in[i] = static_cast(i + offset); - witness_values.emplace_back(pub_y_value.slice(248 - i * 8, 256 - i * 8)); - } - offset += pub_y_indices_in.size(); - for (size_t i = 0; i < 32; ++i) { - signature_in[i] = static_cast(i + offset); - witness_values.emplace_back(signature.r[i]); - } - offset += signature.r.size(); - for (size_t i = 0; i < 32; ++i) { - signature_in[i + 32] = static_cast(i + offset); - witness_values.emplace_back(signature.s[i]); - } - offset += signature.s.size(); - - witness_values.emplace_back(1); - const auto result_in = static_cast(offset); - offset += 1; - witness_values.emplace_back(1); - - ecdsa_r1_constraint = EcdsaSecp256r1Constraint{ - .hashed_message = message_in, - .pub_x_indices = pub_x_indices_in, - .pub_y_indices = pub_y_indices_in, - .result = result_in, - .signature = signature_in, - }; - return offset; -} - -size_t generate_ecdsa_constraint(EcdsaSecp256r1Constraint& ecdsa_r1_constraint, WitnessVector& witness_values) -{ - - std::string message_string = "Instructions unclear, ask again later."; - - // hash the message since the dsl ecdsa gadget uses the prehashed message - // NOTE: If the hash being used outputs more than 32 bytes, then big-field will panic - std::vector message_buffer; - std::copy(message_string.begin(), message_string.end(), std::back_inserter(message_buffer)); - auto hashed_message = sha256(message_buffer); - - ecdsa_key_pair account; - account.private_key = curve_ct::fr::random_element(); - account.public_key = curve_ct::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - return generate_r1_constraints( - ecdsa_r1_constraint, witness_values, account.public_key.x, account.public_key.y, hashed_message, signature); -} - -TEST(ECDSASecp256r1, test_hardcoded) -{ - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - EcdsaSecp256r1Constraint ecdsa_r1_constraint; - WitnessVector witness_values; - - std::string message = "ECDSA proves knowledge of a secret number in the context of a single message"; - std::array hashed_message = { - 84, 112, 91, 163, 186, 175, 219, 223, 186, 140, 95, 154, 112, 247, 168, 155, - 238, 152, 217, 6, 181, 62, 49, 7, 77, 167, 186, 236, 220, 13, 169, 173, - }; - - uint256_t pub_key_x = uint256_t("550f471003f3df97c3df506ac797f6721fb1a1fb7b8f6f83d224498a65c88e24"); - uint256_t pub_key_y = uint256_t("136093d7012e509a73715cbd0b00a3cc0ff4b5c01b3ffa196ab1fb327036b8e6"); - - // 0x2c70a8d084b62bfc5ce03641caf9f72ad4da8c81bfe6ec9487bb5e1bef62a13218ad9ee29eaf351fdc50f1520c425e9b908a07278b43b0ec7b872778c14e0784 - ecdsa_signature signature = { .r = { 44, 112, 168, 208, 132, 182, 43, 252, 92, 224, 54, 65, 202, 249, 247, 42, - 212, 218, 140, 129, 191, 230, 236, 148, 135, 187, 94, 27, 239, 98, 161, 50 }, - .s = { 24, 173, 158, 226, 158, 175, 53, 31, 220, 80, 241, 82, 12, 66, 94, 155, - 144, 138, 7, 39, 139, 67, 176, 236, 123, 135, 39, 120, 193, 78, 7, 132 }, - .v = 0 }; - - ecdsa_key_pair account; - account.private_key = curve_ct::fr(uint256_t("0202020202020202020202020202020202020202020202020202020202020202")); - - account.public_key = curve_ct::g1::one * account.private_key; - - size_t num_variables = - generate_r1_constraints(ecdsa_r1_constraint, witness_values, pub_key_x, pub_key_y, hashed_message, signature); - - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = { ecdsa_r1_constraint }, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - secp256r1::g1::affine_element pub_key = { pub_key_x, pub_key_y }; - bool we_ballin = - ecdsa_verify_signature(message, pub_key, signature); - EXPECT_EQ(we_ballin, true); - - AcirProgram program{ constraint_system, witness_values }; - auto builder = create_circuit(program); - - EXPECT_EQ(builder.get_variable(ecdsa_r1_constraint.result), 1); - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST(ECDSASecp256r1, TestECDSAConstraintSucceed) -{ - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - EcdsaSecp256r1Constraint ecdsa_r1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_r1_constraint, witness_values); - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = { ecdsa_r1_constraint }, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness_values }; - auto builder = create_circuit(program); - - EXPECT_EQ(builder.get_variable(ecdsa_r1_constraint.result), 1); - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -// Test that the verifier can create an ECDSA circuit. -// The ECDSA circuit requires that certain dummy data is valid -// even though we are just building the circuit. -TEST(ECDSASecp256r1, TestECDSACompilesForVerifier) -{ - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - EcdsaSecp256r1Constraint ecdsa_r1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_r1_constraint, witness_values); - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = { ecdsa_r1_constraint }, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, /*witness=*/{} }; - auto builder = create_circuit(program); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} - -TEST(ECDSASecp256r1, TestECDSAConstraintFail) -{ - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - EcdsaSecp256r1Constraint ecdsa_r1_constraint; - WitnessVector witness_values; - size_t num_variables = generate_ecdsa_constraint(ecdsa_r1_constraint, witness_values); - - // set result value to be false - witness_values[witness_values.size() - 1] = 0; - - // tamper with signature - witness_values[witness_values.size() - 20] += 1; - - AcirFormat constraint_system{ - .varnum = static_cast(num_variables), - .num_acir_opcodes = 1, - .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = { ecdsa_r1_constraint }, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, - .original_opcode_indices = create_empty_original_opcode_indices(), - }; - mock_opcode_indices(constraint_system); - - AcirProgram program{ constraint_system, witness_values }; - auto builder = create_circuit(program); - - EXPECT_EQ(builder.get_variable(ecdsa_r1_constraint.result), 0); - - EXPECT_TRUE(CircuitChecker::check(builder)); -} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp index c89f27f73c7a..1a8b22d9d4ee 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp @@ -7,6 +7,7 @@ #include "honk_recursion_constraint.hpp" #include "barretenberg/common/assert.hpp" #include "barretenberg/constants.hpp" +#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/ultra_recursive_flavor.hpp" #include "barretenberg/flavor/ultra_rollup_recursive_flavor.hpp" @@ -54,150 +55,36 @@ void create_dummy_vkey_and_proof(typename Flavor::CircuitBuilder& builder, { using Builder = typename Flavor::CircuitBuilder; using NativeFlavor = typename Flavor::NativeFlavor; - - static constexpr size_t IPA_CLAIM_SIZE = stdlib::recursion::honk::RollupIO::IpaClaim::PUBLIC_INPUTS_SIZE; + using IO = std::conditional_t, + stdlib::recursion::honk::RollupIO, + stdlib::recursion::honk::DefaultIO>; // Set vkey->circuit_size correctly based on the proof size BB_ASSERT_EQ(proof_size, NativeFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS()); - // a lambda that adds dummy commitments (libra and gemini) - auto set_dummy_commitment = [&](size_t& offset) { - auto comm = curve::BN254::AffineElement::one() * fr::random_element(); - auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(proof_fields[offset].witness_index, frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, frs[1]); - builder.set_variable(proof_fields[offset + 2].witness_index, frs[2]); - builder.set_variable(proof_fields[offset + 3].witness_index, frs[3]); - offset += 4; - }; - - auto set_dummy_evaluation = [&](size_t& offset) { - builder.set_variable(proof_fields[offset].witness_index, fr::random_element()); - offset++; - }; - // Note: this computation should always result in log_circuit_size = CONST_PROOF_SIZE_LOG_N - auto log_circuit_size = CONST_PROOF_SIZE_LOG_N; - size_t offset = 0; - // First key field is circuit size - builder.set_variable(key_fields[offset++].witness_index, 1 << log_circuit_size); - // Second key field is number of public inputs - builder.set_variable(key_fields[offset++].witness_index, public_inputs_size); - // Third key field is the pub inputs offset + size_t num_inner_public_inputs = public_inputs_size - IO::PUBLIC_INPUTS_SIZE; uint32_t pub_inputs_offset = NativeFlavor::has_zero_row ? 1 : 0; - builder.set_variable(key_fields[offset++].witness_index, pub_inputs_offset); - size_t num_inner_public_inputs = HasIPAAccumulator ? public_inputs_size - bb::RollupIO::PUBLIC_INPUTS_SIZE - : public_inputs_size - bb::DefaultIO::PUBLIC_INPUTS_SIZE; - for (size_t i = 0; i < Flavor::NUM_PRECOMPUTED_ENTITIES; ++i) { - set_dummy_commitment(offset); - } + // Generate mock honk vk + auto honk_vk = create_mock_honk_vk( + 1 << Flavor::VIRTUAL_LOG_N, pub_inputs_offset, num_inner_public_inputs); - offset = 0; // Reset offset for parsing proof fields - - // the inner public inputs - for (size_t i = 0; i < num_inner_public_inputs; i++) { - set_dummy_evaluation(offset); - } + size_t offset = 0; - // Get some values for a valid aggregation object and use them here to avoid divide by 0 or other issues. - std::array::PUBLIC_INPUTS_SIZE> dummy_pairing_points_values = - PairingPoints::construct_dummy(); - for (size_t i = 0; i < PairingPoints::PUBLIC_INPUTS_SIZE; i++) { - builder.set_variable(proof_fields[offset].witness_index, dummy_pairing_points_values[i]); + // Set honk vk in builder + for (auto& vk_element : honk_vk->to_field_elements()) { + builder.set_variable(key_fields[offset].witness_index, vk_element); offset++; } - // IPA claim - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1392): Don't use random elements here. - if constexpr (HasIPAAccumulator) { - for (size_t i = 0; i < IPA_CLAIM_SIZE; i++) { - set_dummy_evaluation(offset); - } - } + // Generate dummy honk proof + bb::HonkProof honk_proof = create_mock_honk_proof(num_inner_public_inputs); - // first NUM_WITNESS_ENTITIES witness commitments - for (size_t i = 0; i < Flavor::NUM_WITNESS_ENTITIES; i++) { - set_dummy_commitment(offset); - } - - if constexpr (Flavor::HasZK) { - // Libra concatenation commitment - set_dummy_commitment(offset); - // libra sum - set_dummy_evaluation(offset); - } - - // now the univariates, which can just be 0s (8*CONST_PROOF_SIZE_LOG_N Frs, where 8 is the maximum relation - // degree) - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N * Flavor::BATCHED_RELATION_PARTIAL_LENGTH; i++) { - set_dummy_evaluation(offset); - } - - // now the sumcheck evaluations, which is just 44 0s - for (size_t i = 0; i < Flavor::NUM_ALL_ENTITIES; i++) { - set_dummy_evaluation(offset); - } - - if constexpr (Flavor::HasZK) { - // Libra claimed evaluation - set_dummy_evaluation(offset); - // Libra grand sum commitment - - set_dummy_commitment(offset); - // Libra quotient commitment - set_dummy_commitment(offset); - // Gemini masking commitment - set_dummy_commitment(offset); - // Gemini masking evaluation - set_dummy_evaluation(offset); - } - - // now the gemini fold commitments which are CONST_PROOF_SIZE_LOG_N - 1 - for (size_t i = 1; i < CONST_PROOF_SIZE_LOG_N; i++) { - set_dummy_commitment(offset); - } - - // the gemini fold evaluations which are also CONST_PROOF_SIZE_LOG_N - for (size_t i = 1; i <= CONST_PROOF_SIZE_LOG_N; i++) { - set_dummy_evaluation(offset); - } - - if constexpr (Flavor::HasZK) { - // NUM_SMALL_IPA_EVALUATIONS libra evals - for (size_t i = 0; i < NUM_SMALL_IPA_EVALUATIONS; i++) { - set_dummy_evaluation(offset); - } - } - - // lastly the shplonk batched quotient commitment and kzg quotient commitment - for (size_t i = 0; i < 2; i++) { - set_dummy_commitment(offset); - } - // IPA Proof - if constexpr (HasIPAAccumulator) { - - // Ls and Rs - for (size_t i = 0; i < static_cast(2) * CONST_ECCVM_LOG_N; i++) { - auto comm = curve::Grumpkin::AffineElement::one() * fq::random_element(); - auto frs = field_conversion::convert_to_bn254_frs(comm); - builder.set_variable(proof_fields[offset].witness_index, frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, frs[1]); - offset += 2; - } - - // G_zero - auto G_zero = curve::Grumpkin::AffineElement::one() * fq::random_element(); - auto G_zero_frs = field_conversion::convert_to_bn254_frs(G_zero); - builder.set_variable(proof_fields[offset].witness_index, G_zero_frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, G_zero_frs[1]); - offset += 2; - - // a_zero - auto a_zero = fq::random_element(); - auto a_zero_frs = field_conversion::convert_to_bn254_frs(a_zero); - builder.set_variable(proof_fields[offset].witness_index, a_zero_frs[0]); - builder.set_variable(proof_fields[offset + 1].witness_index, a_zero_frs[1]); - offset += 2; + offset = 0; + // Set honk proof in builder + for (auto& proof_element : honk_proof) { + builder.set_variable(proof_fields[offset].witness_index, proof_element); + offset++; } BB_ASSERT_EQ(offset, proof_size + public_inputs_size); @@ -232,26 +119,15 @@ HonkRecursionConstraintOutput create_honk_recur // Construct an in-circuit representation of the verification key. // For now, the v-key is a circuit constant and is fixed for the circuit. // (We may need a separate recursion opcode for this to vary, or add more config witnesses to this opcode) - std::vector> key_fields; - key_fields.reserve(input.key.size()); - for (const auto& idx : input.key) { - auto field = field_ct::from_witness_index(&builder, idx); - key_fields.emplace_back(field); - } + std::vector> key_fields = RecursionConstraint::fields_from_witnesses(builder, input.key); // Create circuit type for vkey hash. auto vk_hash = field_ct::from_witness_index(&builder, input.key_hash); - stdlib::Proof proof_fields; - // Create witness indices for the proof with public inputs reinserted std::vector proof_indices = - ProofSurgeon::create_indices_for_reconstructed_proof(input.proof, input.public_inputs); - proof_fields.reserve(proof_indices.size()); - for (const auto& idx : proof_indices) { - auto field = field_ct::from_witness_index(&builder, idx); - proof_fields.emplace_back(field); - } + ProofSurgeon::create_indices_for_reconstructed_proof(input.proof, input.public_inputs); + stdlib::Proof proof_fields = RecursionConstraint::fields_from_witnesses(builder, proof_indices); // Populate the key fields and proof fields with dummy values to prevent issues (e.g. points must be on curve). if (!has_valid_witness_assignments) { @@ -265,7 +141,7 @@ HonkRecursionConstraintOutput create_honk_recur } // Recursively verify the proof - auto vkey = std::make_shared(builder, key_fields); + auto vkey = std::make_shared(key_fields); auto vk_and_hash = std::make_shared(vkey, vk_hash); RecursiveVerifier verifier(&builder, vk_and_hash); UltraRecursiveVerifierOutput verifier_output = verifier.template verify_proof(proof_fields); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.test.cpp index 2ce2731a07d0..1ad4a1e1a5a1 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.test.cpp @@ -1,8 +1,9 @@ #include "honk_recursion_constraint.hpp" #include "acir_format.hpp" #include "acir_format_mocks.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/special_public_inputs/special_public_inputs.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" #include "proof_surgeon.hpp" @@ -18,7 +19,7 @@ template class AcirHonkRecursionConstraint : public : public: using InnerFlavor = typename RecursiveFlavor::NativeFlavor; using InnerBuilder = typename InnerFlavor::CircuitBuilder; - using InnerDeciderProvingKey = DeciderProvingKey_; + using InnerProverInstance = ProverInstance_; using InnerProver = bb::UltraProver_; using InnerVerificationKey = typename InnerFlavor::VerificationKey; using InnerVerifier = bb::UltraVerifier_; @@ -27,7 +28,7 @@ template class AcirHonkRecursionConstraint : public : std::conditional_t, MegaFlavor, std::conditional_t, UltraRollupFlavor, UltraFlavor>>; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using OuterProver = bb::UltraProver_; using OuterVerificationKey = typename OuterFlavor::VerificationKey; using OuterVerifier = bb::UltraVerifier_; @@ -106,28 +107,7 @@ template class AcirHonkRecursionConstraint : public : .public_inputs = { 1, 2 }, .logic_constraints = { logic_constraint }, .range_constraints = { range_a, range_b }, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, .poly_triple_constraints = { expr_a, expr_b, expr_c, expr_d }, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); @@ -165,9 +145,9 @@ template class AcirHonkRecursionConstraint : public : for (auto& inner_circuit : inner_circuits) { - auto proving_key = std::make_shared(inner_circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - InnerProver prover(proving_key, verification_key); + auto prover_instance = std::make_shared(inner_circuit); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + InnerProver prover(prover_instance, verification_key); InnerVerifier verifier(verification_key); auto inner_proof = prover.construct_proof(); @@ -189,7 +169,7 @@ template class AcirHonkRecursionConstraint : public : }(); auto [key_indices, key_hash_index, proof_indices, inner_public_inputs] = - ProofSurgeon::populate_recursion_witness_data( + ProofSurgeon::populate_recursion_witness_data( witness, proof_witnesses, key_witnesses, key_hash_witness, num_public_inputs_to_extract); RecursionConstraint honk_recursion_constraint{ @@ -225,7 +205,7 @@ template class AcirHonkRecursionConstraint : public : return outer_circuit; } - bool verify_proof(const std::shared_ptr& proving_key, + bool verify_proof(const std::shared_ptr& prover_instance, const std::shared_ptr& verification_key, const HonkProof& proof) { @@ -236,7 +216,7 @@ template class AcirHonkRecursionConstraint : public : if constexpr (HasIPAAccumulator) { VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); OuterVerifier verifier(verification_key, ipa_verification_key); - result = verifier.template verify_proof(proof, proving_key->ipa_proof).result; + result = verifier.template verify_proof(proof, prover_instance->ipa_proof).result; } else { OuterVerifier verifier(verification_key); result = verifier.template verify_proof(proof).result; @@ -269,14 +249,14 @@ TYPED_TEST(AcirHonkRecursionConstraint, TestHonkRecursionConstraintVKGeneration) TestFixture::template create_outer_circuit(layer_1_circuits, /*dummy_witnesses=*/true); - auto proving_key = std::make_shared(layer_2_circuit); + auto prover_instance = std::make_shared(layer_2_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); + std::make_shared(prover_instance->get_precomputed()); - auto proving_key_dummy = - std::make_shared(layer_2_circuit_with_dummy_witnesses); + auto prover_instance_dummy = + std::make_shared(layer_2_circuit_with_dummy_witnesses); auto verification_key_dummy = - std::make_shared(proving_key_dummy->get_precomputed()); + std::make_shared(prover_instance_dummy->get_precomputed()); // Compare the two vks EXPECT_EQ(*verification_key_dummy, *verification_key); @@ -292,14 +272,14 @@ TYPED_TEST(AcirHonkRecursionConstraint, TestBasicSingleHonkRecursionConstraint) info("estimate finalized circuit gates = ", layer_2_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_2_circuit); + auto prover_instance = std::make_shared(layer_2_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); - typename TestFixture::OuterProver prover(proving_key, verification_key); - info("prover gates = ", proving_key->dyadic_size()); + std::make_shared(prover_instance->get_precomputed()); + typename TestFixture::OuterProver prover(prover_instance, verification_key); + info("prover gates = ", prover_instance->dyadic_size()); auto proof = prover.construct_proof(); - EXPECT_EQ(TestFixture::verify_proof(proving_key, verification_key, proof), true); + EXPECT_EQ(TestFixture::verify_proof(prover_instance, verification_key, proof), true); } TYPED_TEST(AcirHonkRecursionConstraint, TestBasicDoubleHonkRecursionConstraints) @@ -314,14 +294,14 @@ TYPED_TEST(AcirHonkRecursionConstraint, TestBasicDoubleHonkRecursionConstraints) info("circuit gates = ", layer_2_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_2_circuit); + auto prover_instance = std::make_shared(layer_2_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); - typename TestFixture::OuterProver prover(proving_key, verification_key); - info("prover gates = ", proving_key->dyadic_size()); + std::make_shared(prover_instance->get_precomputed()); + typename TestFixture::OuterProver prover(prover_instance, verification_key); + info("prover gates = ", prover_instance->dyadic_size()); auto proof = prover.construct_proof(); - EXPECT_EQ(TestFixture::verify_proof(proving_key, verification_key, proof), true); + EXPECT_EQ(TestFixture::verify_proof(prover_instance, verification_key, proof), true); } TYPED_TEST(AcirHonkRecursionConstraint, TestOneOuterRecursiveCircuit) @@ -376,14 +356,14 @@ TYPED_TEST(AcirHonkRecursionConstraint, TestOneOuterRecursiveCircuit) info("created second outer circuit"); info("number of gates in layer 3 = ", layer_3_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_3_circuit); + auto prover_instance = std::make_shared(layer_3_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); - typename TestFixture::OuterProver prover(proving_key, verification_key); - info("prover gates = ", proving_key->dyadic_size()); + std::make_shared(prover_instance->get_precomputed()); + typename TestFixture::OuterProver prover(prover_instance, verification_key); + info("prover gates = ", prover_instance->dyadic_size()); auto proof = prover.construct_proof(); - EXPECT_EQ(TestFixture::verify_proof(proving_key, verification_key, proof), true); + EXPECT_EQ(TestFixture::verify_proof(prover_instance, verification_key, proof), true); } /** @@ -427,12 +407,12 @@ TYPED_TEST(AcirHonkRecursionConstraint, TestFullRecursiveComposition) info("created third outer circuit"); info("number of gates in layer 3 circuit = ", layer_3_circuit.get_estimated_num_finalized_gates()); - auto proving_key = std::make_shared(layer_3_circuit); + auto prover_instance = std::make_shared(layer_3_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); - typename TestFixture::OuterProver prover(proving_key, verification_key); - info("prover gates = ", proving_key->dyadic_size()); + std::make_shared(prover_instance->get_precomputed()); + typename TestFixture::OuterProver prover(prover_instance, verification_key); + info("prover gates = ", prover_instance->dyadic_size()); auto proof = prover.construct_proof(); - EXPECT_EQ(TestFixture::verify_proof(proving_key, verification_key, proof), true); + EXPECT_EQ(TestFixture::verify_proof(prover_instance, verification_key, proof), true); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp deleted file mode 100644 index 6907e5d6a7da..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp +++ /dev/null @@ -1,201 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#include "ivc_recursion_constraint.hpp" -#include "barretenberg/common/assert.hpp" -#include "barretenberg/common/throw_or_abort.hpp" -#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" -#include "barretenberg/flavor/flavor.hpp" -#include "barretenberg/flavor/ultra_recursive_flavor.hpp" -#include "barretenberg/flavor/ultra_rollup_recursive_flavor.hpp" -#include "barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp" -#include "barretenberg/stdlib/primitives/bigfield/constants.hpp" -#include "barretenberg/stdlib/primitives/curves/bn254.hpp" -#include "barretenberg/stdlib/primitives/pairing_points.hpp" -#include "proof_surgeon.hpp" -#include "recursion_constraint.hpp" - -namespace acir_format { - -using namespace bb; - -/** - * @brief Create an IVC object with mocked state corresponding to a set of IVC recursion constraints - * @details Construction of a kernel circuit requires two inputs: kernel prgram acir constraints and an IVC instance - * containing state needed to complete the kernel logic, e.g. proofs for input to recursive verifiers. To construct - * verification keys for kernel circuits without running a full IVC, we mock the IVC state corresponding to a provided - * set of IVC recurson constraints. For example, if the constraints contain a single PG recursive verification, we - * initialize an IVC with mocked data for the verifier accumulator, the folding proof, the circuit verification key, - * and a merge proof. - * @note There are only three valid combinations of IVC recursion constraints for a kernel program. See below for - * details. - * - * @param constraints IVC recursion constraints from a kernel circuit - * @param trace_settings - * @return ClientIVC - */ -std::shared_ptr create_mock_ivc_from_constraints(const std::vector& constraints, - const TraceSettings& trace_settings) -{ - auto ivc = std::make_shared(constraints.size(), trace_settings); - - uint32_t oink_type = static_cast(PROOF_TYPE::OINK); - uint32_t pg_type = static_cast(PROOF_TYPE::PG); - uint32_t pg_final_type = static_cast(PROOF_TYPE::PG_FINAL); - uint32_t pg_tail_type = static_cast(PROOF_TYPE::PG_TAIL); - - // There is a fixed set of valid combinations of IVC recursion constraints for Aztec kernel circuits: - - // Case: INIT kernel; single Oink recursive verification of an app - if (constraints.size() == 1 && constraints[0].proof_type == oink_type) { - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::OINK, /*is_kernel=*/false); - return ivc; - } - - // Case: RESET kernel; single PG recursive verification of a kernel - if (constraints.size() == 1 && constraints[0].proof_type == pg_type) { - ivc->recursive_verifier_native_accum = create_mock_decider_vk(); - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); - return ivc; - } - - // Case: TAIL kernel; single PG recursive verification of a kernel - if (constraints.size() == 1 && constraints[0].proof_type == pg_tail_type) { - ivc->recursive_verifier_native_accum = create_mock_decider_vk(); - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_TAIL, /*is_kernel=*/true); - return ivc; - } - - // Case: INNER kernel; two PG recursive verifications, kernel and app in that order - if (constraints.size() == 2) { - BB_ASSERT_EQ(constraints[0].proof_type, pg_type); - BB_ASSERT_EQ(constraints[1].proof_type, pg_type); - ivc->recursive_verifier_native_accum = create_mock_decider_vk(); - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/false); - return ivc; - } - - // Case: HIDING kernel; single PG_FINAL recursive verification of a kernel - if (constraints.size() == 1 && constraints[0].proof_type == pg_final_type) { - ivc->recursive_verifier_native_accum = create_mock_decider_vk(); - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1283): We need to set the log circuit size here due - // to an invalid out of circuit max operation in the PG recursive verifier. Once that is resolved this should - // not be necessary. - ivc->recursive_verifier_native_accum->vk->log_circuit_size = 18; - mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_FINAL, /*is_kernel=*/true); - return ivc; - } - - throw_or_abort("Invalid set of IVC recursion constraints!"); - return ivc; -} - -/** - * @brief Create a mock verification queue entry with proof and VK that have the correct structure but are not - * necessarily valid - * - */ -ClientIVC::VerifierInputs create_mock_verification_queue_entry(const ClientIVC::QUEUE_TYPE verification_type, - const TraceSettings& trace_settings, - const bool is_kernel) -{ - using FF = ClientIVC::FF; - using MegaVerificationKey = ClientIVC::MegaVerificationKey; - using Flavor = ClientIVC::Flavor; - - // Use the trace settings to determine the correct dyadic size and the public inputs offset - MegaExecutionTraceBlocks blocks; - blocks.set_fixed_block_sizes(trace_settings); - blocks.compute_offsets(/*is_structured=*/true); - size_t dyadic_size = blocks.get_structured_dyadic_size(); - size_t pub_inputs_offset = blocks.pub_inputs.trace_offset(); - - // Construct a mock Oink or PG proof and a mock MegaHonk verification key - std::vector proof; - std::shared_ptr verification_key; - - if (is_kernel) { - using KernelIO = stdlib::recursion::honk::KernelIO; - switch (verification_type) { - case ClientIVC::QUEUE_TYPE::OINK: - proof = create_mock_oink_proof(); - break; - case ClientIVC::QUEUE_TYPE::PG: - case ClientIVC::QUEUE_TYPE::PG_FINAL: - case ClientIVC::QUEUE_TYPE::PG_TAIL: - proof = create_mock_pg_proof(); - break; - default: - throw_or_abort("Invalid verification type! Only OINK, PG and PG_FINAL are supported"); - } - verification_key = create_mock_honk_vk(dyadic_size, pub_inputs_offset); - } else { - using AppIO = stdlib::recursion::honk::AppIO; - switch (verification_type) { - case ClientIVC::QUEUE_TYPE::OINK: - proof = create_mock_oink_proof(); - break; - case ClientIVC::QUEUE_TYPE::PG: - case ClientIVC::QUEUE_TYPE::PG_FINAL: - proof = create_mock_pg_proof(); - break; - default: - throw_or_abort("Invalid verification type! Only OINK, PG and PG_FINAL are supported"); - } - verification_key = create_mock_honk_vk(dyadic_size, pub_inputs_offset); - } - - return ClientIVC::VerifierInputs{ proof, verification_key, verification_type, is_kernel }; -} - -/** - * @brief Populate an IVC instance with data that mimics the state after a single IVC accumulation (Oink or PG) - * @details Mock state consists of a mock verification queue entry of type OINK (proof, VK) and a mocked merge proof - * - * @param ivc - * @param num_public_inputs_app num pub inputs in accumulated app, excluding fixed components, e.g. pairing points - */ -void mock_ivc_accumulation(const std::shared_ptr& ivc, ClientIVC::QUEUE_TYPE type, const bool is_kernel) -{ - ClientIVC::VerifierInputs entry = - acir_format::create_mock_verification_queue_entry(type, ivc->trace_settings, is_kernel); - ivc->verification_queue.emplace_back(entry); - ivc->goblin.merge_verification_queue.emplace_back(acir_format::create_mock_merge_proof()); - // If the type is PG_FINAL, we also need to populate the ivc instance with a mock decider proof - if (type == ClientIVC::QUEUE_TYPE::PG_FINAL) { - // we have to create a mock honk vk - ivc->honk_vk = entry.honk_vk; - ivc->decider_proof = acir_format::create_mock_decider_proof(); - } - ivc->num_circuits_accumulated++; -} - -/** - * @brief Populate VK witness fields from a recursion constraint from a provided VerificationKey - * - * @param builder - * @param mock_verification_key - * @param key_witness_indices - */ -void populate_dummy_vk_in_constraint(MegaCircuitBuilder& builder, - const std::shared_ptr& mock_verification_key, - std::vector& key_witness_indices) -{ - using FF = ClientIVC::FF; - - // Convert the VerificationKey to fields - std::vector mock_vk_fields = mock_verification_key->to_field_elements(); - BB_ASSERT_EQ(mock_vk_fields.size(), key_witness_indices.size()); - - // Add the fields to the witness and set the key witness indices accordingly - for (auto [witness_idx, value] : zip_view(key_witness_indices, mock_vk_fields)) { - builder.set_variable(witness_idx, value); - } -} - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp deleted file mode 100644 index 1379ec7ba02b..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp +++ /dev/null @@ -1,555 +0,0 @@ -#include "barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp" -#include "acir_format.hpp" -#include "acir_format_mocks.hpp" -#include "barretenberg/client_ivc/client_ivc.hpp" -#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" -#include "barretenberg/goblin/mock_circuits.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" -#include "barretenberg/ultra_honk/ultra_prover.hpp" -#include "barretenberg/ultra_honk/ultra_verifier.hpp" -#include "honk_recursion_constraint.hpp" -#include "proof_surgeon.hpp" - -#include -#include - -using namespace acir_format; -using namespace bb; - -class IvcRecursionConstraintTest : public ::testing::Test { - - public: - using Builder = MegaCircuitBuilder; - using Flavor = MegaFlavor; - using VerificationKey = MegaFlavor::VerificationKey; - using FF = Flavor::FF; - using VerifierInputs = ClientIVC::VerifierInputs; - using QUEUE_TYPE = ClientIVC::QUEUE_TYPE; - using VerificationQueue = ClientIVC::VerificationQueue; - using ArithmeticConstraint = AcirFormat::PolyTripleConstraint; - using PairingPoints = ClientIVC::PairingPoints; - - /** - * @brief Constuct a simple arbitrary circuit to represent a mock app circuit - * - */ - static Builder construct_mock_app_circuit(const std::shared_ptr& ivc) - { - Builder circuit{ ivc->goblin.op_queue }; - GoblinMockCircuits::add_some_ecc_op_gates(circuit); - MockCircuits::add_arithmetic_gates(circuit); - PairingPoints::add_default_to_public_inputs(circuit); - return circuit; - } - - static std::shared_ptr get_verification_key(Builder& builder_in, - const TraceSettings& trace_settings) - { - // This is a workaround to ensure that the circuit is finalized before we create the verification key - // In practice, this should not be needed as the circuit will be finalized when it is accumulated into the IVC - // but this is a workaround for the test setup. - // Create a copy of the input circuit - MegaCircuitBuilder_ builder{ builder_in }; - - // Deepcopy the opqueue to avoid modifying the original one - builder.op_queue = std::make_shared(*builder.op_queue); - std::shared_ptr proving_key = - std::make_shared(builder, trace_settings); - std::shared_ptr vk = std::make_shared(proving_key->get_precomputed()); - return vk; - } - - static UltraCircuitBuilder create_inner_circuit(size_t log_num_gates = 10) - { - using InnerPairingPoints = bb::stdlib::recursion::PairingPoints; - - UltraCircuitBuilder builder; - - // Create 2^log_n many add gates based on input log num gates - const size_t num_gates = (1 << log_num_gates); - for (size_t i = 0; i < num_gates; ++i) { - fr a = fr::random_element(); - uint32_t a_idx = builder.add_variable(a); - - fr b = fr::random_element(); - fr c = fr::random_element(); - fr d = a + b + c; - uint32_t b_idx = builder.add_variable(b); - uint32_t c_idx = builder.add_variable(c); - uint32_t d_idx = builder.add_variable(d); - - builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); - } - - InnerPairingPoints::add_default_to_public_inputs(builder); - return builder; - } - - /** - * @brief Constuct a mock app circuit with a UH recursive verifier - * - */ - static Builder construct_mock_UH_recursion_app_circuit(const std::shared_ptr& ivc, const bool tamper_vk) - { - AcirProgram program; - std::vector recursion_constraints; - - Builder circuit{ ivc->goblin.op_queue }; - GoblinMockCircuits::add_some_ecc_op_gates(circuit); - MockCircuits::add_arithmetic_gates(circuit); - - { - using RecursiveFlavor = UltraRecursiveFlavor_; - using VerifierOutput = bb::stdlib::recursion::honk::UltraRecursiveVerifierOutput; - using StdlibProof = bb::stdlib::Proof; - using StdlibIO = bb::stdlib::recursion::honk::DefaultIO; - - // Create an arbitrary inner circuit - auto inner_circuit = create_inner_circuit(); - - // Compute native verification key - auto proving_key = std::make_shared>(inner_circuit); - auto honk_vk = std::make_shared(proving_key->get_precomputed()); - UltraProver prover(proving_key, honk_vk); // A prerequisite for computing VK - auto inner_proof = prover.construct_proof(); - - if (tamper_vk) { - honk_vk->q_l = g1::one; - UltraVerifier_ verifier(honk_vk); - EXPECT_FALSE(verifier.template verify_proof(inner_proof).result); - } - // Instantiate the recursive verifier using the native verification key - auto stdlib_vk_and_hash = std::make_shared(circuit, honk_vk); - stdlib::recursion::honk::UltraRecursiveVerifier_ verifier(&circuit, stdlib_vk_and_hash); - - StdlibProof stdlib_inner_proof(circuit, inner_proof); - VerifierOutput output = verifier.template verify_proof(stdlib_inner_proof); - - // IO - StdlibIO inputs; - inputs.pairing_inputs = output.points_accumulator; - inputs.set_public(); // propagate resulting pairing points on the public inputs - } - - return circuit; - } - - /** - * @brief Create an ACIR RecursionConstraint given the corresponding verifier inputs - * @brief In practice such constraints are created via a call to verify_proof(...) in noir - * - * @param input bberg style proof and verification key - * @param witness Array of witnesses into which the above data is placed - * @return RecursionConstraint - */ - static RecursionConstraint create_recursion_constraint(const VerifierInputs& input, SlabVector& witness) - { - // Assemble simple vectors of witnesses for vkey and proof - std::vector key_witnesses = input.honk_vk->to_field_elements(); - FF key_hash_witness = input.honk_vk->hash(); - std::vector proof_witnesses = input.proof; // proof contains the public inputs at this stage - - // Construct witness indices for each component in the constraint; populate the witness array - auto [key_indices, key_hash_index, proof_indices, public_inputs_indices] = - ProofSurgeon::populate_recursion_witness_data( - witness, proof_witnesses, key_witnesses, key_hash_witness, /*num_public_inputs_to_extract=*/0); - - // The proof type can be either Oink or PG or PG_FINAL - PROOF_TYPE proof_type; - switch (input.type) { - case QUEUE_TYPE::OINK: - proof_type = OINK; - break; - case QUEUE_TYPE::PG: - proof_type = PG; - break; - case QUEUE_TYPE::PG_FINAL: - proof_type = PG_FINAL; - break; - case QUEUE_TYPE::PG_TAIL: - proof_type = PG_TAIL; - break; - default: - throw std::runtime_error("Invalid proof type"); - } - - return RecursionConstraint{ - .key = key_indices, - .proof = {}, // the proof witness indices are not needed in an ivc recursion constraint - .public_inputs = public_inputs_indices, - .key_hash = key_hash_index, - .proof_type = proof_type, - }; - } - - /** - * @brief Generate an acir program {constraints, witness} for a mock kernel - * @details The IVC contains and internal verification queue that contains proofs to be recursively verified. - * Construct an AcirProgram with a RecursionConstraint for each entry in the ivc verification queue. (In practice - * these constraints would come directly from calls to verify_proof in noir). - * @note This method needs the number of public inputs in each proof-to-be-verified so they can be extracted and - * provided separately as is required in the acir constraint system. - * - * @param ivc - * @param inner_circuit_num_pub_inputs Num pub inputs for each circuit whose accumulation is recursively - * verified - * @return Builder - */ - static AcirProgram construct_mock_kernel_program(const VerificationQueue& verification_queue) - { - AcirProgram program; - - // Construct recursion constraints based on the ivc verification queue; populate the witness along the way - std::vector ivc_recursion_constraints; - ivc_recursion_constraints.reserve(verification_queue.size()); - for (const auto& queue_entry : verification_queue) { - ivc_recursion_constraints.push_back(create_recursion_constraint(queue_entry, program.witness)); - } - - // Construct a constraint system containing the business logic and ivc recursion constraints - program.constraints.varnum = static_cast(program.witness.size()); - program.constraints.num_acir_opcodes = static_cast(ivc_recursion_constraints.size()); - program.constraints.ivc_recursion_constraints = ivc_recursion_constraints; - program.constraints.original_opcode_indices = create_empty_original_opcode_indices(); - mock_opcode_indices(program.constraints); - - return program; - } - - static void construct_and_accumulate_mock_kernel(std::shared_ptr ivc, TraceSettings trace_settings) - { - // construct a mock kernel program (acir) from the ivc verification queue - const ProgramMetadata metadata{ ivc }; - AcirProgram mock_kernel_program = construct_mock_kernel_program(ivc->verification_queue); - auto kernel = acir_format::create_circuit(mock_kernel_program, metadata); - EXPECT_TRUE(CircuitChecker::check(kernel)); - auto kernel_vk = construct_kernel_vk_from_acir_program(mock_kernel_program, trace_settings); - ivc->accumulate(kernel, kernel_vk); - - if (ivc->num_circuits_accumulated == ivc->get_num_circuits()) { - Builder circuit{ ivc->goblin.op_queue }; - ivc->complete_kernel_circuit_logic(circuit); - } - } - - /** - * @brief Construct a kernel circuit VK from an acir program with IVC recursion constraints - * - * @param program Acir program representing a kernel circuit - * @param trace_settings needed for construction of the VK - * @return std::shared_ptr - */ - static std::shared_ptr construct_kernel_vk_from_acir_program( - AcirProgram& program, const TraceSettings& trace_settings) - { - // Create kernel circuit from the kernel program - Builder kernel = acir_format::create_circuit(program); - - // Manually construct the VK for the kernel circuit - auto proving_key = std::make_shared(kernel, trace_settings); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - return verification_key; - } - - protected: - void SetUp() override { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } -}; - -/** - * @brief Check that the size of a mock merge proof matches expectation - */ -TEST_F(IvcRecursionConstraintTest, MockMergeProofSize) -{ - Goblin::MergeProof merge_proof = create_mock_merge_proof(); - EXPECT_EQ(merge_proof.size(), MERGE_PROOF_SIZE); -} - -/** - * @brief Test IVC accumulation of a one app and one kernel; The kernel includes a recursive oink verification for the - * app, specified via an ACIR RecursionConstraint. - */ -TEST_F(IvcRecursionConstraintTest, AccumulateTwo) -{ - TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; - auto ivc = std::make_shared(/*num_circuits=*/2, trace_settings); - - // construct a mock app_circuit - auto app_circuit = construct_mock_app_circuit(ivc); - - auto app_vk = get_verification_key(app_circuit, trace_settings); - // Complete instance and generate an oink proof - ivc->accumulate(app_circuit, app_vk); - - // Construct kernel consisting only of the kernel completion logic - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - EXPECT_TRUE(ivc->prove_and_verify()); -} - -/** - * @brief Test IVC accumulation of two apps and two kernels; The first kernel contains a recursive oink verification and - * the second contains two recursive PG verifications, all specified via ACIR RecursionConstraints. - */ -TEST_F(IvcRecursionConstraintTest, AccumulateFour) -{ - TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; - // 4 ciruits and the tail kernel - auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); - - // construct a mock app_circuit - Builder app_circuit_0 = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit_0, get_verification_key(app_circuit_0, trace_settings)); - - const ProgramMetadata metadata{ ivc }; - - // Construct kernel_0; consists of a single oink recursive verification for app (plus databus/merge logic) - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - // construct a mock app_circuit - Builder app_circuit_1 = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit_1, get_verification_key(app_circuit_1, trace_settings)); - - // construct and accumulate a "Reset" Kernel circuit - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - // Now we add the tail kernel - EXPECT_EQ(ivc->verification_queue.size(), 1); - EXPECT_EQ(ivc->verification_queue[0].type, QUEUE_TYPE::PG_TAIL); - construct_and_accumulate_mock_kernel(ivc, trace_settings); - EXPECT_TRUE(ivc->prove_and_verify()); -} - -// Test generation of "init" kernel VK via dummy IVC data -TEST_F(IvcRecursionConstraintTest, GenerateInitKernelVKFromConstraints) -{ - const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - - // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) - std::shared_ptr expected_kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/2, trace_settings); - - // Construct and accumulate mock app_circuit - Builder app_circuit = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - - // Construct and accumulate kernel consisting only of the kernel completion logic - construct_and_accumulate_mock_kernel(ivc, trace_settings); - expected_kernel_vk = ivc->verification_queue.back().honk_vk; - } - - // Now, construct the kernel VK by mocking the post app accumulation state of the IVC - std::shared_ptr kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/2, trace_settings); - - // Construct kernel consisting only of the kernel completion logic - acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::OINK, /*is_kernel=*/false); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - program.witness = {}; // remove the witness to mimick VK construction context - - kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); - } - - // Compare the VK constructed via running the IVc with the one constructed via mocking - EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); -} - -// Test generation of "reset" or "tail" kernel VK via dummy IVC data -TEST_F(IvcRecursionConstraintTest, GenerateResetKernelVKFromConstraints) -{ - const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - - // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) - std::shared_ptr expected_kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/3, trace_settings); - - const ProgramMetadata metadata{ ivc }; - - // Construct and accumulate mock app_circuit - Builder app_circuit = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - - // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) - construct_and_accumulate_mock_kernel(ivc, trace_settings); - EXPECT_TRUE(ivc->verification_queue.size() == 1); - EXPECT_TRUE(ivc->verification_queue[0].type == bb::ClientIVC::QUEUE_TYPE::PG_TAIL); - - // Construct and accumulate a mock RESET/TAIL kernel (PG recursion for kernel accumulation) - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - expected_kernel_vk = ivc->verification_queue.back().honk_vk; - } - - // Now, construct the kernel VK by mocking the IVC state prior to kernel construction - std::shared_ptr kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/3, trace_settings); - - // Construct kernel consisting only of the kernel completion logic - acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - program.witness = {}; // remove the witness to mimick VK construction context - - kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); - } - - // Compare the VK constructed via running the IVc with the one constructed via mocking - EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); -} - -// Test generation of "inner" kernel VK via dummy IVC data -TEST_F(IvcRecursionConstraintTest, GenerateInnerKernelVKFromConstraints) -{ - const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - - // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) - std::shared_ptr expected_kernel_vk; - { - // we have to set the number of circuits one more than the number of circuits we're accumulating as otherwise - // the last circuit will be seen as a tail - auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); - - const ProgramMetadata metadata{ ivc }; - - { // Construct and accumulate mock app_circuit - Builder app_circuit = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - } - - // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - { // Construct and accumulate a second mock app_circuit - Builder app_circuit = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - } - - { // Construct and accumulate a mock INNER kernel (PG recursion for kernel accumulation) - EXPECT_TRUE(ivc->verification_queue.size() == 2); - EXPECT_TRUE(ivc->verification_queue[1].type == bb::ClientIVC::QUEUE_TYPE::PG); - construct_and_accumulate_mock_kernel(ivc, trace_settings); - } - - expected_kernel_vk = ivc->verification_queue.back().honk_vk; - } - - // Now, construct the kernel VK by mocking the IVC state prior to kernel construction - std::shared_ptr kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/4, trace_settings); - - // Construct kernel consisting only of the kernel completion logic - acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); - acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/false); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - program.witness = {}; // remove the witness to mimick VK construction context - - kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); - } - - // Compare the VK constructed via running the IVc with the one constructed via mocking - EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); -} - -// Test generation of "hiding" kernel VK via dummy IVC data -TEST_F(IvcRecursionConstraintTest, GenerateHidingKernelVKFromConstraints) -{ - const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - - // First, construct the kernel VK by running the full IVC - std::shared_ptr expected_hiding_kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/3, trace_settings); - const ProgramMetadata metadata{ ivc }; - - { - // Construct and accumulate mock app_circuit - Builder app_circuit = construct_mock_app_circuit(ivc); - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - } - - { - // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) - construct_and_accumulate_mock_kernel(ivc, trace_settings); - } - - { // Construct and accumulate a mock TAIL kernel (PG recursion for kernel accumulation) - EXPECT_TRUE(ivc->verification_queue.size() == 1); - EXPECT_TRUE(ivc->verification_queue[0].type == bb::ClientIVC::QUEUE_TYPE::PG_TAIL); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - Builder kernel = acir_format::create_circuit(program, metadata); - ivc->accumulate(kernel, construct_kernel_vk_from_acir_program(program, trace_settings)); - } - - { - // Construct the hiding kernel and its VK - EXPECT_TRUE(ivc->verification_queue.size() == 1); - EXPECT_TRUE(ivc->verification_queue[0].type == bb::ClientIVC::QUEUE_TYPE::PG_FINAL); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - Builder kernel = acir_format::create_circuit(program, metadata); - // Note: Cannot call ivc->accumulate(kernel) here; hiding circuit is not yet supported - auto proving_key = ivc->compute_hiding_circuit_proving_key(); - expected_hiding_kernel_vk = - std::make_shared(proving_key->get_precomputed()); - } - } - - // Now, construct the kernel VK by mocking the IVC state prior to kernel construction - std::shared_ptr kernel_vk; - { - auto ivc = std::make_shared(/*num_circuits=*/1, TraceSettings()); - // construct a mock tail kernel - acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_FINAL, /*is_kernel=*/true); - AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); - program.witness = {}; // remove the witness to mimick VK construction context - kernel_vk = construct_kernel_vk_from_acir_program(program, TraceSettings()); - } - - // Compare the VK constructed via running the IVc with the one constructed via mocking - EXPECT_EQ(*kernel_vk.get(), *expected_hiding_kernel_vk.get()); -} - -/** - * @brief Test IVC accumulation of a one app and one kernel. The app includes a UltraHonk Recursive Verifier. - * This test was copied from the AccumulateTwo test. - */ -TEST_F(IvcRecursionConstraintTest, RecursiveVerifierAppCircuitTest) -{ - TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - auto ivc = std::make_shared(/*num_circuits*/ 2, trace_settings); - - // construct a mock app_circuit - Builder app_circuit = construct_mock_UH_recursion_app_circuit(ivc, /*tamper_vk=*/false); - - // Complete instance and generate an oink proof - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - - // Construct kernel consisting only of the kernel completion logic - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - EXPECT_TRUE(ivc->prove_and_verify()); -} - -/** - * @brief Test IVC accumulation of a one app and one kernel. The app includes a UltraHonk Recursive Verifier that - * verifies a failed proof. This test was copied from the AccumulateTwo test. - */ -TEST_F(IvcRecursionConstraintTest, BadRecursiveVerifierAppCircuitTest) -{ - TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - auto ivc = std::make_shared(/*num_circuits*/ 2, trace_settings); - - // construct a mock app_circuit that has bad pairing point object - Builder app_circuit = construct_mock_UH_recursion_app_circuit(ivc, /*tamper_vk=*/true); - - // Complete instance and generate an oink proof - ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); - - // Construct kernel consisting only of the kernel completion logic - construct_and_accumulate_mock_kernel(ivc, trace_settings); - - // We expect the CIVC proof to fail due to the app with a failed UH recursive verification - EXPECT_FALSE(ivc->prove_and_verify()); -} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp index b13f669d943e..9d6551dfa4c2 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp @@ -13,7 +13,6 @@ #include "barretenberg/stdlib/primitives/curves/bn254.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include "proof_surgeon.hpp" #include "recursion_constraint.hpp" @@ -27,9 +26,10 @@ using namespace bb; * @param fields field buffer to append mock commitment values to * @param num_commitments number of mock commitments to append */ +template void populate_field_elements_for_mock_commitments(std::vector& fields, const size_t& num_commitments) { - auto mock_commitment = curve::BN254::AffineElement::one(); + auto mock_commitment = Curve::AffineElement::one(); std::vector mock_commitment_frs = field_conversion::convert_to_bn254_frs(mock_commitment); for (size_t i = 0; i < num_commitments; ++i) { for (const fr& val : mock_commitment_frs) { @@ -38,18 +38,43 @@ void populate_field_elements_for_mock_commitments(std::vector& fields, const } } +/** + * @brief Helper to populate a field buffer with some number of field elements + * + * @param fields field buffer to append field elements to + * @param num_elements number of mock field elements to append + * @param value optional mock value appended + */ +template +void populate_field_elements(std::vector& fields, + const size_t& num_elements, + std::optional value = std::nullopt) +{ + for (size_t i = 0; i < num_elements; ++i) { + std::vector field_elements = value.has_value() + ? field_conversion::convert_to_bn254_frs(value.value()) + : field_conversion::convert_to_bn254_frs(FF::random_element()); + fields.insert(fields.end(), field_elements.begin(), field_elements.end()); + } +} + /** * @brief Create a mock oink proof that has the correct structure but is not in general valid * + * @param inner_public_inputs_size Number of public inputs coming from the ACIR constraints */ -template HonkProof create_mock_oink_proof() +template HonkProof create_mock_oink_proof(const size_t inner_public_inputs_size) { HonkProof proof; // Populate mock public inputs - typename Flavor::CircuitBuilder builder; + typename PublicInputs::Builder builder; PublicInputs::add_default(builder); + // Populate the proof with as many public inputs as required from the ACIR constraints + populate_field_elements(proof, inner_public_inputs_size); + + // Populate the proof with the public inputs added from barretenberg for (const auto& pub : builder.public_inputs()) { proof.emplace_back(builder.get_variable(pub)); } @@ -66,35 +91,69 @@ template HonkProof create_mock_oink_proof( */ template HonkProof create_mock_decider_proof() { - using FF = typename Flavor::FF; - + using FF = Flavor::FF; + using Curve = Flavor::Curve; HonkProof proof; - // Sumcheck univariates - const size_t TOTAL_SIZE_SUMCHECK_UNIVARIATES = CONST_PROOF_SIZE_LOG_N * Flavor::BATCHED_RELATION_PARTIAL_LENGTH; - for (size_t i = 0; i < TOTAL_SIZE_SUMCHECK_UNIVARIATES; ++i) { - proof.emplace_back(FF::random_element()); + constexpr size_t const_proof_log_n = Flavor::VIRTUAL_LOG_N; + + if constexpr (Flavor::HasZK) { + // Libra concatenation commitment + populate_field_elements_for_mock_commitments(proof, 1); + + // Libra sum + populate_field_elements(proof, 1); } + // Sumcheck univariates + const size_t TOTAL_SIZE_SUMCHECK_UNIVARIATES = const_proof_log_n * Flavor::BATCHED_RELATION_PARTIAL_LENGTH; + populate_field_elements(proof, TOTAL_SIZE_SUMCHECK_UNIVARIATES); + // Sumcheck multilinear evaluations - for (size_t i = 0; i < Flavor::NUM_ALL_ENTITIES; ++i) { - proof.emplace_back(FF::random_element()); + populate_field_elements(proof, Flavor::NUM_ALL_ENTITIES); + + if constexpr (Flavor::HasZK) { + // Libra claimed evaluation + populate_field_elements(proof, 1); + + // Libra grand sum commitment + populate_field_elements_for_mock_commitments(proof, 1); + + // Libra quotient commitment + populate_field_elements_for_mock_commitments(proof, 1); + + // Gemini masking commitment + populate_field_elements_for_mock_commitments(proof, 1); + + // Gemini masking evaluation + populate_field_elements(proof, 1); } // Gemini fold commitments - const size_t NUM_GEMINI_FOLD_COMMITMENTS = CONST_PROOF_SIZE_LOG_N - 1; - populate_field_elements_for_mock_commitments(proof, NUM_GEMINI_FOLD_COMMITMENTS); + const size_t NUM_GEMINI_FOLD_COMMITMENTS = const_proof_log_n - 1; + populate_field_elements_for_mock_commitments(proof, NUM_GEMINI_FOLD_COMMITMENTS); // Gemini fold evaluations - const size_t NUM_GEMINI_FOLD_EVALUATIONS = CONST_PROOF_SIZE_LOG_N; - for (size_t i = 0; i < NUM_GEMINI_FOLD_EVALUATIONS; ++i) { - proof.emplace_back(FF::random_element()); + const size_t NUM_GEMINI_FOLD_EVALUATIONS = const_proof_log_n; + populate_field_elements(proof, NUM_GEMINI_FOLD_EVALUATIONS); + + if constexpr (std::is_same_v) { + // Gemini P pos evaluation + populate_field_elements(proof, 1); + + // Gemini P neg evaluation + populate_field_elements(proof, 1); + } + + if constexpr (Flavor::HasZK) { + // NUM_SMALL_IPA_EVALUATIONS libra evals + populate_field_elements(proof, NUM_SMALL_IPA_EVALUATIONS); } // Shplonk batched quotient commitment - populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); // KZG quotient commitment - populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); return proof; } @@ -102,16 +161,22 @@ template HonkProof create_mock_decider_proof() /** * @brief Create a mock honk proof that has the correct structure but is not in general valid * + * @param inner_public_inputs_size Number of public inputs coming from the ACIR constraints */ -template HonkProof create_mock_honk_proof() +template HonkProof create_mock_honk_proof(const size_t inner_public_inputs_size) { // Construct a Honk proof as the concatenation of an Oink proof and a Decider proof - HonkProof oink_proof = create_mock_oink_proof(); + HonkProof oink_proof = create_mock_oink_proof(inner_public_inputs_size); HonkProof decider_proof = create_mock_decider_proof(); HonkProof proof; proof.reserve(oink_proof.size() + decider_proof.size()); proof.insert(proof.end(), oink_proof.begin(), oink_proof.end()); proof.insert(proof.end(), decider_proof.begin(), decider_proof.end()); + + if constexpr (HasIPAAccumulator) { + HonkProof ipa_proof = create_mock_ipa_proof(); + proof.insert(proof.end(), ipa_proof.begin(), ipa_proof.end()); + } return proof; } @@ -125,15 +190,11 @@ template HonkProof create_mock_pg_proof() HonkProof proof = create_mock_oink_proof(); // Populate mock perturbator coefficients - for (size_t idx = 1; idx <= CONST_PG_LOG_N; idx++) { - proof.emplace_back(0); - } + populate_field_elements(proof, CONST_PG_LOG_N, /*value=*/fr::zero()); // Populate mock combiner quotient coefficients - for (size_t idx = DeciderProvingKeys_::NUM; idx < DeciderProvingKeys_::BATCHED_EXTENDED_LENGTH; - idx++) { - proof.emplace_back(0); - } + size_t NUM_COEFF_COMBINER_QUOTIENT = computed_batched_extended_length() - NUM_INSTANCES; + populate_field_elements(proof, NUM_COEFF_COMBINER_QUOTIENT, /*value=*/fr::zero()); return proof; } @@ -145,18 +206,13 @@ template HonkProof create_mock_pg_proof() */ Goblin::MergeProof create_mock_merge_proof() { - using Flavor = MegaFlavor; - using FF = Flavor::FF; - - std::vector proof; + Goblin::MergeProof proof; proof.reserve(MERGE_PROOF_SIZE); - FF mock_val(5); - auto mock_commitment = curve::BN254::AffineElement::one(); - std::vector mock_commitment_frs = field_conversion::convert_to_bn254_frs(mock_commitment); + uint32_t mock_shift_size = 5; // Must be smaller than 32, otherwise pow raises an error - // Populate mock subtable size - proof.emplace_back(mock_val); + // Populate mock shift size + populate_field_elements(proof, 1, /*value=*/fr{ mock_shift_size }); // There are 8 entities in the merge protocol (4 columns x 2 components: T_j, g_j(X) = X^{l-1} t_j(X)) // and 8 evaluations (4 columns x 2 components: g_j(kappa), t_j(1/kappa)) @@ -164,43 +220,213 @@ Goblin::MergeProof create_mock_merge_proof() const size_t NUM_TRANSCRIPT_EVALUATIONS = 8; // Transcript poly commitments - for (size_t i = 0; i < NUM_TRANSCRIPT_ENTITIES; ++i) { - for (const FF& val : mock_commitment_frs) { - proof.emplace_back(val); - } - } + populate_field_elements_for_mock_commitments(proof, NUM_TRANSCRIPT_ENTITIES); + // Transcript poly evaluations - for (size_t i = 0; i < NUM_TRANSCRIPT_EVALUATIONS; ++i) { - proof.emplace_back(mock_val); - } + populate_field_elements(proof, NUM_TRANSCRIPT_EVALUATIONS); // Shplonk proof: commitment to the quotient - for (const FF& val : mock_commitment_frs) { - proof.emplace_back(val); - } + populate_field_elements_for_mock_commitments(proof, 1); // KZG proof: commitment to W - for (const FF& val : mock_commitment_frs) { - proof.emplace_back(val); - } + populate_field_elements_for_mock_commitments(proof, 1); BB_ASSERT_EQ(proof.size(), MERGE_PROOF_SIZE); return proof; } +template HonkProof create_mock_civc_proof(const size_t inner_public_inputs_size) +{ + HonkProof proof; + + HonkProof mega_proof = create_mock_honk_proof>( + inner_public_inputs_size); + Goblin::MergeProof merge_proof = create_mock_merge_proof(); + ECCVMProof eccvm_proof{ create_mock_pre_ipa_proof(), create_mock_ipa_proof() }; + HonkProof translator_proof = create_mock_translator_proof(); + + ClientIVC::Proof civc_proof{ mega_proof, { merge_proof, eccvm_proof, translator_proof } }; + proof = civc_proof.to_field_elements(); + + return proof; +} + +/** + * @brief Create a mock pre-ipa proof which has the correct structure but is not necessarily valid + * + * @details An ECCVM proof is made of a pre-ipa proof and an ipa-proof. Here we mock the pre-ipa part. + * + * @return HonkProof + */ +HonkProof create_mock_pre_ipa_proof() +{ + using FF = ECCVMFlavor::FF; + HonkProof proof; + + // 1. NUM_WITNESS_ENTITIES commitments + populate_field_elements_for_mock_commitments(proof, ECCVMFlavor::NUM_WITNESS_ENTITIES); + + // 2. Libra concatenation commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments*/ 1); + + // 3. Libra sum + populate_field_elements(proof, 1); + + // 4. Sumcheck univariates commitments + 5. Sumcheck univariate evaluations + for (size_t idx = 0; idx < CONST_ECCVM_LOG_N; idx++) { + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + populate_field_elements(proof, /*num_elements=*/2); + } + + // 6. ALL_ENTITIES sumcheck evaluations + populate_field_elements(proof, ECCVMFlavor::NUM_ALL_ENTITIES); + + // 7. Libra evaluation + populate_field_elements(proof, 1); + + // 8. Libra grand sum commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 9. Libra quotient commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 10. Gemini masking commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 11. Gemini masking evaluations + populate_field_elements(proof, 1); + + // 12. Gemini fold commitments + populate_field_elements_for_mock_commitments(proof, + /*num_commitments=*/CONST_ECCVM_LOG_N - 1); + + // 13. Gemini evaluations + populate_field_elements(proof, CONST_ECCVM_LOG_N); + + // 14. NUM_SMALL_IPA_EVALUATIONS libra evals + populate_field_elements(proof, NUM_SMALL_IPA_EVALUATIONS); + + // 15. Shplonk + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 16. Translator concatenated masking term commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 17. Translator op evaluation + populate_field_elements(proof, 1); + + // 18. Translator Px evaluation + populate_field_elements(proof, 1); + + // 19. Translator Py evaluation + populate_field_elements(proof, 1); + + // 20. Translator z1 evaluation + populate_field_elements(proof, 1); + + // 21. Translator z2 evaluation + populate_field_elements(proof, 1); + + // 22. Translator concatenated masking term evaluation + populate_field_elements(proof, 1); + + // 23. Translator grand sum commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 24. Translator quotient commitment + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // 25. Translator concatenation evaluation + populate_field_elements(proof, 1); + + // 26. Translator grand sum shift evaluation + populate_field_elements(proof, 1); + + // 27. Translator grand sum evaluation + populate_field_elements(proof, 1); + + // 28. Translator quotient evaluation + populate_field_elements(proof, 1); + + // 29. Shplonk + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + BB_ASSERT_EQ(proof.size(), ECCVMFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS - IPA_PROOF_LENGTH); + + return proof; +} + +/** + * @brief Create a mock ipa proof which has the correct structure but is not necessarily valid + * + * @details An ECCVM proof is made of a pre-ipa proof and an ipa-proof. Here we mock the ipa part. + * + * @return HonkProof + */ +HonkProof create_mock_ipa_proof() +{ + HonkProof proof; + + // Commitments to L and R for CONST_ECCVM_LOG_N round + populate_field_elements_for_mock_commitments( + proof, /*num_commitments=*/CONST_ECCVM_LOG_N + CONST_ECCVM_LOG_N); + + // Commitment to G_0 + populate_field_elements_for_mock_commitments(proof, /*num_commitments=*/1); + + // a_0 evaluation (a_0 is in the base field of BN254) + populate_field_elements(proof, 1); + + BB_ASSERT_EQ(proof.size(), IPA_PROOF_LENGTH); + + return proof; +} + +/** + * @brief Create a mock translator proof which has the correct structure but is not necessarily valid + * + * @return HonkProof + */ +HonkProof create_mock_translator_proof() +{ + using BF = TranslatorFlavor::BF; + using Curve = TranslatorFlavor::Curve; + + HonkProof proof; + HonkProof decider_proof = create_mock_decider_proof(); + + // 1. Accumulated result + populate_field_elements(proof, 1); + + // 2. NUM_WITNESS_ENTITIES commitments + populate_field_elements_for_mock_commitments(proof, + /*num_commitments=*/TranslatorFlavor::NUM_WITNESS_ENTITIES - 4); + + // Insert decider proof + proof.insert(proof.end(), decider_proof.begin(), decider_proof.end()); + + BB_ASSERT_EQ(proof.size(), TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS); + + return proof; +} + /** * @brief Create a mock MegaHonk VK that has the correct structure * + * @param dyadic_size Dyadic size of the circuit for which we generate a vk + * @param pub_inputs_offest Indicating whether the circuit has a first zero row + * @param inner_public_inputs_size Number of public inputs coming from the ACIR constraints */ template std::shared_ptr create_mock_honk_vk(const size_t dyadic_size, - const size_t pub_inputs_offset) + const size_t pub_inputs_offset, + const size_t inner_public_inputs_size) { // Set relevant VK metadata and commitments auto honk_verification_key = std::make_shared(); honk_verification_key->log_circuit_size = bb::numeric::get_msb(dyadic_size); - honk_verification_key->num_public_inputs = PublicInputs::PUBLIC_INPUTS_SIZE; + honk_verification_key->num_public_inputs = inner_public_inputs_size + PublicInputs::PUBLIC_INPUTS_SIZE; honk_verification_key->pub_inputs_offset = pub_inputs_offset; // must be set correctly for (auto& commitment : honk_verification_key->get_all()) { @@ -211,57 +437,101 @@ std::shared_ptr create_mock_honk_vk(const size } /** - * @brief Create a mock Decider verification key for initilization of a mock verifier accumulator + * @brief Create a mock instance for initilization of a mock verifier accumulator * */ -template std::shared_ptr> create_mock_decider_vk() +template std::shared_ptr> create_mock_verifier_instance() { using FF = typename Flavor::FF; // Set relevant VK metadata and commitments - auto decider_verification_key = std::make_shared>(); + auto verifier_instance = std::make_shared>(); std::shared_ptr vk = create_mock_honk_vk>( 0, 0); // metadata does not need to be accurate - decider_verification_key->vk = vk; - decider_verification_key->is_accumulator = true; - decider_verification_key->gate_challenges = std::vector(static_cast(CONST_PG_LOG_N), 0); + verifier_instance->vk = vk; + verifier_instance->is_complete = true; + verifier_instance->gate_challenges = std::vector(static_cast(CONST_PG_LOG_N), FF::random_element()); - for (auto& commitment : decider_verification_key->witness_commitments.get_all()) { + for (auto& commitment : verifier_instance->witness_commitments.get_all()) { commitment = curve::BN254::AffineElement::one(); // arbitrary mock commitment } - return decider_verification_key; + return verifier_instance; } // Explicitly instantiate template functions -template HonkProof create_mock_oink_proof(); -template HonkProof create_mock_oink_proof(); -template HonkProof create_mock_oink_proof>(); - -template HonkProof create_mock_oink_proof>(); +template HonkProof create_mock_oink_proof(const size_t); +template HonkProof create_mock_oink_proof(const size_t); +template HonkProof create_mock_oink_proof>( + const size_t); + +template HonkProof create_mock_oink_proof>( + const size_t); +template HonkProof create_mock_oink_proof>( + const size_t); +template HonkProof create_mock_oink_proof>( + const size_t); +template HonkProof create_mock_oink_proof>( + const size_t); +template HonkProof create_mock_oink_proof(const size_t); template HonkProof create_mock_decider_proof(); template HonkProof create_mock_decider_proof(); - -template HonkProof create_mock_honk_proof(); -template HonkProof create_mock_honk_proof(); -template HonkProof create_mock_honk_proof>(); - -template HonkProof create_mock_honk_proof>(); +template HonkProof create_mock_decider_proof(); +template HonkProof create_mock_decider_proof(); +template HonkProof create_mock_decider_proof(); + +template HonkProof create_mock_honk_proof(const size_t); +template HonkProof create_mock_honk_proof(const size_t); +template HonkProof create_mock_honk_proof>( + const size_t); + +template HonkProof create_mock_honk_proof>( + const size_t); +template HonkProof create_mock_honk_proof>( + const size_t); +template HonkProof create_mock_honk_proof>( + const size_t); +template HonkProof create_mock_honk_proof>( + const size_t); +template HonkProof create_mock_honk_proof(const size_t); template HonkProof create_mock_pg_proof(); template HonkProof create_mock_pg_proof(); template HonkProof create_mock_pg_proof>(); +template HonkProof create_mock_civc_proof(const size_t); +template HonkProof create_mock_civc_proof(const size_t); + template std::shared_ptr create_mock_honk_vk( - const size_t, const size_t); + const size_t, const size_t, const size_t); template std::shared_ptr create_mock_honk_vk( - const size_t, const size_t); + const size_t, const size_t, const size_t); template std::shared_ptr create_mock_honk_vk< MegaFlavor, - stdlib::recursion::honk::HidingKernelIO>(const size_t, const size_t); -template std::shared_ptr> create_mock_decider_vk(); + stdlib::recursion::honk::HidingKernelIO>(const size_t, const size_t, const size_t); +template std::shared_ptr create_mock_honk_vk< + MegaZKFlavor, + stdlib::recursion::honk::HidingKernelIO>(const size_t, const size_t, const size_t); + +template std::shared_ptr create_mock_honk_vk< + UltraFlavor, + stdlib::recursion::honk::DefaultIO>(const size_t, const size_t, const size_t); +template std::shared_ptr create_mock_honk_vk< + UltraZKFlavor, + stdlib::recursion::honk::DefaultIO>(const size_t, const size_t, const size_t); +template std::shared_ptr create_mock_honk_vk< + UltraFlavor, + stdlib::recursion::honk::DefaultIO>(const size_t, const size_t, const size_t); +template std::shared_ptr create_mock_honk_vk< + UltraZKFlavor, + stdlib::recursion::honk::DefaultIO>(const size_t, const size_t, const size_t); +template std::shared_ptr create_mock_honk_vk( + const size_t, const size_t, const size_t); + +template std::shared_ptr> create_mock_verifier_instance(); } // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp index eef92550e588..cd5b25635442 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp @@ -9,20 +9,27 @@ #include "barretenberg/dsl/acir_format/recursion_constraint.hpp" #include "barretenberg/goblin/goblin.hpp" #include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" -#include "barretenberg/ultra_honk/decider_verification_key.hpp" +#include "barretenberg/ultra_honk/verifier_instance.hpp" #include namespace acir_format { -template bb::HonkProof create_mock_oink_proof(); +template +bb::HonkProof create_mock_oink_proof(const size_t inner_public_inputs_size = 0); template bb::HonkProof create_mock_decider_proof(); -template bb::HonkProof create_mock_honk_proof(); +template +bb::HonkProof create_mock_honk_proof(const size_t inner_public_inputs_size = 0); template bb::HonkProof create_mock_pg_proof(); bb::Goblin::MergeProof create_mock_merge_proof(); +bb::HonkProof create_mock_pre_ipa_proof(); +bb::HonkProof create_mock_ipa_proof(); +bb::HonkProof create_mock_translator_proof(); +template bb::HonkProof create_mock_civc_proof(const size_t inner_public_inputs_size = 0); template std::shared_ptr create_mock_honk_vk(const size_t dyadic_size, - const size_t pub_inputs_offset); -template std::shared_ptr> create_mock_decider_vk(); + const size_t pub_inputs_offset, + const size_t inner_public_inputs_size = 0); +template std::shared_ptr> create_mock_verifier_instance(); } // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp index 4429064470f3..6aa22e17201c 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp @@ -3,8 +3,9 @@ #include "acir_format_mocks.hpp" #include "barretenberg/client_ivc/client_ivc.hpp" #include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" #include "honk_recursion_constraint.hpp" @@ -16,9 +17,12 @@ using namespace acir_format; using namespace bb; -template class MockVerifierInputsTest : public ::testing::Test {}; +template class MockVerifierInputsTest : public ::testing::Test { + public: + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } +}; -using FlavorTypes = testing::Types; +using FlavorTypes = testing::Types; TYPED_TEST_SUITE(MockVerifierInputsTest, FlavorTypes); @@ -31,6 +35,33 @@ TEST(MockVerifierInputsTest, MockMergeProofSize) EXPECT_EQ(merge_proof.size(), MERGE_PROOF_SIZE); } +/** + * @brief Check that the size of a mock pre-ipa proof matches expectation + */ +TEST(MockVerifierInputsTest, MockPreIpaProofSize) +{ + HonkProof pre_ipa_proof = create_mock_pre_ipa_proof(); + EXPECT_EQ(pre_ipa_proof.size(), ECCVMFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS - IPA_PROOF_LENGTH); +} + +/** + * @brief Check that the size of a mock ipa proof matches expectation + */ +TEST(MockVerifierInputsTest, MockIPAProofSize) +{ + HonkProof ipa_proof = create_mock_ipa_proof(); + EXPECT_EQ(ipa_proof.size(), IPA_PROOF_LENGTH); +} + +/** + * @brief Check that the size of a mock translator proof matches expectation + */ +TEST(MockVerifierInputsTest, MockTranslatorProofSize) +{ + HonkProof translator_proof = create_mock_translator_proof(); + EXPECT_EQ(translator_proof.size(), TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS); +} + /** * @brief Check that the size of a mock Oink proof matches expectation for MegaFlavor * @@ -63,18 +94,24 @@ TEST(MockVerifierInputsTest, MockMegaOinkProofSize) } /** - * @brief Check that the size of a mock Oink proof matches expectation for UltraFlavor + * @brief Check that the size of a mock Oink proof matches expectation for Ultra flavors * */ -TEST(MockVerifierInputsTest, MockUltraOinkProofSize) +TYPED_TEST(MockVerifierInputsTest, MockUltraOinkProofSize) { - using Flavor = UltraFlavor; - using Builder = UltraCircuitBuilder; - - // DefaultIO - const size_t NUM_PUBLIC_INPUTS = stdlib::recursion::honk::DefaultIO::PUBLIC_INPUTS_SIZE; - HonkProof honk_proof = create_mock_oink_proof>(); - EXPECT_EQ(honk_proof.size(), Flavor::OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + NUM_PUBLIC_INPUTS); + using Flavor = TypeParam; + using Builder = Flavor::CircuitBuilder; + using IO = std::conditional_t, + stdlib::recursion::honk::RollupIO, + stdlib::recursion::honk::DefaultIO>; + + if (!std::is_same_v) { + const size_t NUM_PUBLIC_INPUTS = IO::PUBLIC_INPUTS_SIZE; + HonkProof honk_proof = create_mock_oink_proof(); + EXPECT_EQ(honk_proof.size(), Flavor::OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + NUM_PUBLIC_INPUTS); + } else { + GTEST_SKIP(); + } } /** @@ -85,8 +122,12 @@ TYPED_TEST(MockVerifierInputsTest, MockDeciderProofSize) { using Flavor = TypeParam; - HonkProof honk_proof = create_mock_decider_proof(); - EXPECT_EQ(honk_proof.size(), Flavor::DECIDER_PROOF_LENGTH()); + if (!std::is_same_v) { + HonkProof honk_proof = create_mock_decider_proof(); + EXPECT_EQ(honk_proof.size(), Flavor::DECIDER_PROOF_LENGTH()); + } else { + GTEST_SKIP(); + } } /** @@ -121,16 +162,34 @@ TEST(MockVerifierInputsTest, MockMegaHonkProofSize) } /** - * @brief Check that the size of a mock Honk proof matches expectation for UltraFlavor + * @brief Check that the size of a mock Honk proof matches expectation for Ultra flavors * */ -TEST(MockVerifierInputsTest, MockHonkProofSize) +TYPED_TEST(MockVerifierInputsTest, MockUltraHonkProofSize) { - using Flavor = UltraFlavor; - using Builder = UltraCircuitBuilder; + using Flavor = TypeParam; + using Builder = Flavor::CircuitBuilder; + using IO = std::conditional_t, + stdlib::recursion::honk::RollupIO, + stdlib::recursion::honk::DefaultIO>; + + if (!std::is_same_v) { + const size_t NUM_PUBLIC_INPUTS = IO::PUBLIC_INPUTS_SIZE; + HonkProof honk_proof = create_mock_honk_proof(); + EXPECT_EQ(honk_proof.size(), Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + NUM_PUBLIC_INPUTS); + } else { + GTEST_SKIP(); + } +} + +/** + * @brief Check that the size of a mock ClientIVC proof matches expectation + * + */ +TEST(MockVerifierInputsTest, MockClientIVCProofSize) +{ + using Builder = MegaCircuitBuilder; - // DefaultIO - const size_t NUM_PUBLIC_INPUTS = stdlib::recursion::honk::DefaultIO::PUBLIC_INPUTS_SIZE; - HonkProof honk_proof = create_mock_honk_proof>(); - EXPECT_EQ(honk_proof.size(), Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + NUM_PUBLIC_INPUTS); + HonkProof civc_proof = create_mock_civc_proof(); + EXPECT_EQ(civc_proof.size(), ClientIVC::Proof::PROOF_LENGTH()); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.cpp index 3d399ca238c2..e91ebec40e82 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.cpp @@ -49,7 +49,8 @@ void create_multi_scalar_mul_constraint(Builder& builder, if (output_point.is_point_at_infinity().is_constant()) { builder.fix_witness(input.out_point_is_infinite, output_point.is_point_at_infinity().get_value()); } else { - builder.assert_equal(output_point.is_point_at_infinity().witness_index, input.out_point_is_infinite); + builder.assert_equal(output_point.is_point_at_infinity().get_normalized_witness_index(), + input.out_point_is_infinite); } if (output_point.x.is_constant()) { builder.fix_witness(input.out_point_x, output_point.x.get_value()); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.hpp index 97b20eeeb797..85fc8f57aa1a 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.hpp @@ -17,13 +17,17 @@ namespace acir_format { struct MultiScalarMul { std::vector> points; std::vector> scalars; + // Predicate indicating whether the constraint should be disabled: + // - true: the constraint is valid + // - false: the constraint is disabled, i.e it must not fail and can return whatever. + WitnessOrConstant predicate; uint32_t out_point_x; uint32_t out_point_y; uint32_t out_point_is_infinite; // for serialization, update with any new fields - MSGPACK_FIELDS(points, scalars, out_point_x, out_point_y, out_point_is_infinite); + MSGPACK_FIELDS(points, scalars, predicate, out_point_x, out_point_y, out_point_is_infinite); friend bool operator==(MultiScalarMul const& lhs, MultiScalarMul const& rhs) = default; }; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.test.cpp index 98d4059c7d76..5a7f11bdbf9b 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/multi_scalar_mul.test.cpp @@ -60,30 +60,7 @@ TEST_F(MSMTests, TestMSM) .varnum = 9, .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, .multi_scalar_mul_constraints = { msm_constrain }, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.cpp new file mode 100644 index 000000000000..839778a12ddb --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.cpp @@ -0,0 +1,193 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#include "pg_recursion_constraint.hpp" +#include "barretenberg/common/assert.hpp" +#include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" +#include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/ultra_recursive_flavor.hpp" +#include "barretenberg/flavor/ultra_rollup_recursive_flavor.hpp" +#include "barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp" +#include "barretenberg/stdlib/primitives/bigfield/constants.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/primitives/pairing_points.hpp" +#include "proof_surgeon.hpp" +#include "recursion_constraint.hpp" + +namespace acir_format { + +using namespace bb; + +/** + * @brief Create an IVC object with mocked state corresponding to a set of IVC recursion constraints + * @details Construction of a kernel circuit requires two inputs: kernel prgram acir constraints and an IVC instance + * containing state needed to complete the kernel logic, e.g. proofs for input to recursive verifiers. To construct + * verification keys for kernel circuits without running a full IVC, we mock the IVC state corresponding to a provided + * set of IVC recurson constraints. For example, if the constraints contain a single PG recursive verification, we + * initialize an IVC with mocked data for the verifier accumulator, the folding proof, the circuit verification key, + * and a merge proof. + * @note There are only three valid combinations of IVC recursion constraints for a kernel program. See below for + * details. + * + * @param constraints IVC recursion constraints from a kernel circuit + * @param trace_settings + * @return ClientIVC + */ +std::shared_ptr create_mock_ivc_from_constraints(const std::vector& constraints, + const TraceSettings& trace_settings) +{ + auto ivc = std::make_shared(constraints.size(), trace_settings); + + uint32_t oink_type = static_cast(PROOF_TYPE::OINK); + uint32_t pg_type = static_cast(PROOF_TYPE::PG); + uint32_t pg_final_type = static_cast(PROOF_TYPE::PG_FINAL); + uint32_t pg_tail_type = static_cast(PROOF_TYPE::PG_TAIL); + + // There is a fixed set of valid combinations of IVC recursion constraints for Aztec kernel circuits: + + // Case: INIT kernel; single Oink recursive verification of an app + if (constraints.size() == 1 && constraints[0].proof_type == oink_type) { + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::OINK, /*is_kernel=*/false); + return ivc; + } + + // Case: RESET kernel; single PG recursive verification of a kernel + if (constraints.size() == 1 && constraints[0].proof_type == pg_type) { + ivc->recursive_verifier_native_accum = create_mock_verifier_instance(); + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); + return ivc; + } + + // Case: TAIL kernel; single PG recursive verification of a kernel + if (constraints.size() == 1 && constraints[0].proof_type == pg_tail_type) { + ivc->recursive_verifier_native_accum = create_mock_verifier_instance(); + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_TAIL, /*is_kernel=*/true); + return ivc; + } + + // Case: INNER kernel; two PG recursive verifications, kernel and app in that order + if (constraints.size() == 2) { + BB_ASSERT_EQ(constraints[0].proof_type, pg_type); + BB_ASSERT_EQ(constraints[1].proof_type, pg_type); + ivc->recursive_verifier_native_accum = create_mock_verifier_instance(); + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/false); + return ivc; + } + + // Case: HIDING kernel; single PG_FINAL recursive verification of a kernel + if (constraints.size() == 1 && constraints[0].proof_type == pg_final_type) { + ivc->recursive_verifier_native_accum = create_mock_verifier_instance(); + mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_FINAL, /*is_kernel=*/true); + return ivc; + } + + throw_or_abort("Invalid set of IVC recursion constraints!"); + return ivc; +} + +/** + * @brief Create a mock verification queue entry with proof and VK that have the correct structure but are not + * necessarily valid + * + */ +ClientIVC::VerifierInputs create_mock_verification_queue_entry(const ClientIVC::QUEUE_TYPE verification_type, + const TraceSettings& trace_settings, + const bool is_kernel) +{ + using FF = ClientIVC::FF; + using MegaVerificationKey = ClientIVC::MegaVerificationKey; + using Flavor = ClientIVC::Flavor; + + // Use the trace settings to determine the correct dyadic size and the public inputs offset + MegaExecutionTraceBlocks blocks; + blocks.set_fixed_block_sizes(trace_settings); + blocks.compute_offsets(/*is_structured=*/true); + size_t dyadic_size = blocks.get_structured_dyadic_size(); + size_t pub_inputs_offset = blocks.pub_inputs.trace_offset(); + + // Construct a mock Oink or PG proof and a mock MegaHonk verification key + std::vector proof; + std::shared_ptr verification_key; + + if (is_kernel) { + using KernelIO = stdlib::recursion::honk::KernelIO; + switch (verification_type) { + case ClientIVC::QUEUE_TYPE::OINK: + proof = create_mock_oink_proof(); + break; + case ClientIVC::QUEUE_TYPE::PG: + case ClientIVC::QUEUE_TYPE::PG_FINAL: + case ClientIVC::QUEUE_TYPE::PG_TAIL: + proof = create_mock_pg_proof(); + break; + default: + throw_or_abort("Invalid verification type! Only OINK, PG and PG_FINAL are supported"); + } + verification_key = create_mock_honk_vk(dyadic_size, pub_inputs_offset); + } else { + using AppIO = stdlib::recursion::honk::AppIO; + switch (verification_type) { + case ClientIVC::QUEUE_TYPE::OINK: + proof = create_mock_oink_proof(); + break; + case ClientIVC::QUEUE_TYPE::PG: + proof = create_mock_pg_proof(); + break; + default: + throw_or_abort("Invalid verification type! Only OINK, PG and PG_FINAL are supported"); + } + verification_key = create_mock_honk_vk(dyadic_size, pub_inputs_offset); + } + + return ClientIVC::VerifierInputs{ proof, verification_key, verification_type, is_kernel }; +} + +/** + * @brief Populate an IVC instance with data that mimics the state after a single IVC accumulation (Oink or PG) + * @details Mock state consists of a mock verification queue entry of type OINK (proof, VK) and a mocked merge proof + * + * @param ivc + * @param num_public_inputs_app num pub inputs in accumulated app, excluding fixed components, e.g. pairing points + */ +void mock_ivc_accumulation(const std::shared_ptr& ivc, ClientIVC::QUEUE_TYPE type, const bool is_kernel) +{ + ClientIVC::VerifierInputs entry = + acir_format::create_mock_verification_queue_entry(type, ivc->trace_settings, is_kernel); + ivc->verification_queue.emplace_back(entry); + ivc->goblin.merge_verification_queue.emplace_back(acir_format::create_mock_merge_proof()); + // If the type is PG_FINAL, we also need to populate the ivc instance with a mock decider proof + if (type == ClientIVC::QUEUE_TYPE::PG_FINAL) { + ivc->decider_proof = acir_format::create_mock_decider_proof(); + } + ivc->num_circuits_accumulated++; +} + +/** + * @brief Populate VK witness fields from a recursion constraint from a provided VerificationKey + * + * @param builder + * @param mock_verification_key + * @param key_witness_indices + */ +void populate_dummy_vk_in_constraint(MegaCircuitBuilder& builder, + const std::shared_ptr& mock_verification_key, + std::vector& key_witness_indices) +{ + using FF = ClientIVC::FF; + + // Convert the VerificationKey to fields + std::vector mock_vk_fields = mock_verification_key->to_field_elements(); + BB_ASSERT_EQ(mock_vk_fields.size(), key_witness_indices.size()); + + // Add the fields to the witness and set the key witness indices accordingly + for (auto [witness_idx, value] : zip_view(key_witness_indices, mock_vk_fields)) { + builder.set_variable(witness_idx, value); + } +} + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.hpp similarity index 100% rename from barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp rename to barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.hpp diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.test.cpp new file mode 100644 index 000000000000..13f3db4454fa --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/pg_recursion_constraint.test.cpp @@ -0,0 +1,617 @@ +#include "barretenberg/dsl/acir_format/pg_recursion_constraint.hpp" +#include "acir_format.hpp" +#include "acir_format_mocks.hpp" +#include "barretenberg/client_ivc/client_ivc.hpp" +#include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" +#include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" +#include "barretenberg/ultra_honk/ultra_prover.hpp" +#include "barretenberg/ultra_honk/ultra_verifier.hpp" +#include "honk_recursion_constraint.hpp" +#include "proof_surgeon.hpp" + +#include +#include + +using namespace acir_format; +using namespace bb; + +class IvcRecursionConstraintTest : public ::testing::Test { + + public: + using Builder = MegaCircuitBuilder; + using Flavor = MegaFlavor; + using VerificationKey = MegaFlavor::VerificationKey; + using FF = Flavor::FF; + using VerifierInputs = ClientIVC::VerifierInputs; + using QUEUE_TYPE = ClientIVC::QUEUE_TYPE; + using VerificationQueue = ClientIVC::VerificationQueue; + using ArithmeticConstraint = AcirFormat::PolyTripleConstraint; + using PairingPoints = ClientIVC::PairingPoints; + + static constexpr size_t NUM_TRAILING_KERNELS = 3; // reset, tail, hiding + + /** + * @brief Constuct a simple arbitrary circuit to represent a mock app circuit + * + */ + static Builder construct_mock_app_circuit(const std::shared_ptr& ivc) + { + Builder circuit{ ivc->goblin.op_queue }; + GoblinMockCircuits::add_some_ecc_op_gates(circuit); + MockCircuits::add_arithmetic_gates(circuit); + PairingPoints::add_default_to_public_inputs(circuit); + return circuit; + } + + static std::shared_ptr get_verification_key(Builder& builder_in, + const TraceSettings& trace_settings) + { + // This is a workaround to ensure that the circuit is finalized before we create the verification key + // In practice, this should not be needed as the circuit will be finalized when it is accumulated into the IVC + // but this is a workaround for the test setup. + // Create a copy of the input circuit + MegaCircuitBuilder_ builder{ builder_in }; + + // Deepcopy the opqueue to avoid modifying the original one + builder.op_queue = std::make_shared(*builder.op_queue); + std::shared_ptr prover_instance = + std::make_shared(builder, trace_settings); + std::shared_ptr vk = std::make_shared(prover_instance->get_precomputed()); + return vk; + } + + static void construct_and_accumulate_trailing_kernels(const std::shared_ptr& ivc, + TraceSettings trace_settings) + { + + // Reset kernel + EXPECT_EQ(ivc->verification_queue.size(), 1); + EXPECT_EQ(ivc->verification_queue[0].type, QUEUE_TYPE::PG); + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // Tail kernel + EXPECT_EQ(ivc->verification_queue.size(), 1); + EXPECT_EQ(ivc->verification_queue[0].type, QUEUE_TYPE::PG_TAIL); + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // Hiding kernel + EXPECT_EQ(ivc->verification_queue.size(), 1); + EXPECT_EQ(ivc->verification_queue[0].type, QUEUE_TYPE::PG_FINAL); + construct_and_accumulate_mock_kernel(ivc, TraceSettings{}); + } + + static UltraCircuitBuilder create_inner_circuit(size_t log_num_gates = 10) + { + using InnerPairingPoints = bb::stdlib::recursion::PairingPoints; + + UltraCircuitBuilder builder; + + // Create 2^log_n many add gates based on input log num gates + const size_t num_gates = (1 << log_num_gates); + for (size_t i = 0; i < num_gates; ++i) { + fr a = fr::random_element(); + uint32_t a_idx = builder.add_variable(a); + + fr b = fr::random_element(); + fr c = fr::random_element(); + fr d = a + b + c; + uint32_t b_idx = builder.add_variable(b); + uint32_t c_idx = builder.add_variable(c); + uint32_t d_idx = builder.add_variable(d); + + builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); + } + + InnerPairingPoints::add_default_to_public_inputs(builder); + return builder; + } + + /** + * @brief Constuct a mock app circuit with a UH recursive verifier + * + */ + static Builder construct_mock_UH_recursion_app_circuit(const std::shared_ptr& ivc, const bool tamper_vk) + { + AcirProgram program; + std::vector recursion_constraints; + + Builder circuit{ ivc->goblin.op_queue }; + GoblinMockCircuits::add_some_ecc_op_gates(circuit); + MockCircuits::add_arithmetic_gates(circuit); + + { + using RecursiveFlavor = UltraRecursiveFlavor_; + using VerifierOutput = bb::stdlib::recursion::honk::UltraRecursiveVerifierOutput; + using StdlibProof = bb::stdlib::Proof; + using StdlibIO = bb::stdlib::recursion::honk::DefaultIO; + + // Create an arbitrary inner circuit + auto inner_circuit = create_inner_circuit(); + + // Compute native verification key + auto prover_instance = std::make_shared>(inner_circuit); + auto honk_vk = std::make_shared(prover_instance->get_precomputed()); + UltraProver prover(prover_instance, honk_vk); // A prerequisite for computing VK + auto inner_proof = prover.construct_proof(); + + if (tamper_vk) { + honk_vk->q_l = g1::one; + UltraVerifier_ verifier(honk_vk); + EXPECT_FALSE(verifier.template verify_proof(inner_proof).result); + } + // Instantiate the recursive verifier using the native verification key + auto stdlib_vk_and_hash = std::make_shared(circuit, honk_vk); + stdlib::recursion::honk::UltraRecursiveVerifier_ verifier(&circuit, stdlib_vk_and_hash); + + StdlibProof stdlib_inner_proof(circuit, inner_proof); + VerifierOutput output = verifier.template verify_proof(stdlib_inner_proof); + + // IO + StdlibIO inputs; + inputs.pairing_inputs = output.points_accumulator; + inputs.set_public(); // propagate resulting pairing points on the public inputs + } + + return circuit; + } + + /** + * @brief Create an ACIR RecursionConstraint given the corresponding verifier inputs + * @brief In practice such constraints are created via a call to verify_proof(...) in noir + * + * @param input bberg style proof and verification key + * @param witness Array of witnesses into which the above data is placed + * @return RecursionConstraint + */ + static RecursionConstraint create_recursion_constraint(const VerifierInputs& input, SlabVector& witness) + { + // Assemble simple vectors of witnesses for vkey and proof + std::vector key_witnesses = input.honk_vk->to_field_elements(); + FF key_hash_witness = input.honk_vk->hash(); + std::vector proof_witnesses = input.proof; // proof contains the public inputs at this stage + + // Construct witness indices for each component in the constraint; populate the witness array + auto [key_indices, key_hash_index, proof_indices, public_inputs_indices] = + ProofSurgeon::populate_recursion_witness_data( + witness, proof_witnesses, key_witnesses, key_hash_witness, /*num_public_inputs_to_extract=*/0); + + // The proof type can be either Oink or PG or PG_FINAL + PROOF_TYPE proof_type; + switch (input.type) { + case QUEUE_TYPE::OINK: + proof_type = OINK; + break; + case QUEUE_TYPE::PG: + proof_type = PG; + break; + case QUEUE_TYPE::PG_FINAL: + proof_type = PG_FINAL; + break; + case QUEUE_TYPE::PG_TAIL: + proof_type = PG_TAIL; + break; + default: + throw std::runtime_error("Invalid proof type"); + } + + return RecursionConstraint{ + .key = key_indices, + .proof = {}, // the proof witness indices are not needed in an ivc recursion constraint + .public_inputs = public_inputs_indices, + .key_hash = key_hash_index, + .proof_type = proof_type, + }; + } + + /** + * @brief Generate an acir program {constraints, witness} for a mock kernel + * @details The IVC contains and internal verification queue that contains proofs to be recursively verified. + * Construct an AcirProgram with a RecursionConstraint for each entry in the ivc verification queue. (In practice + * these constraints would come directly from calls to verify_proof in noir). + * @note This method needs the number of public inputs in each proof-to-be-verified so they can be extracted and + * provided separately as is required in the acir constraint system. + * + * @param ivc + * @param inner_circuit_num_pub_inputs Num pub inputs for each circuit whose accumulation is recursively + * verified + * @return Builder + */ + static AcirProgram construct_mock_kernel_program(const VerificationQueue& verification_queue) + { + AcirProgram program; + + // Construct recursion constraints based on the ivc verification queue; populate the witness along the way + std::vector pg_recursion_constraints; + pg_recursion_constraints.reserve(verification_queue.size()); + for (const auto& queue_entry : verification_queue) { + pg_recursion_constraints.push_back(create_recursion_constraint(queue_entry, program.witness)); + } + + // Construct a constraint system containing the business logic and ivc recursion constraints + program.constraints.varnum = static_cast(program.witness.size()); + program.constraints.num_acir_opcodes = static_cast(pg_recursion_constraints.size()); + program.constraints.pg_recursion_constraints = pg_recursion_constraints; + program.constraints.original_opcode_indices = create_empty_original_opcode_indices(); + mock_opcode_indices(program.constraints); + + return program; + } + + static void construct_and_accumulate_mock_kernel(std::shared_ptr ivc, TraceSettings trace_settings) + { + // construct a mock kernel program (acir) from the ivc verification queue + const ProgramMetadata metadata{ ivc }; + AcirProgram mock_kernel_program = construct_mock_kernel_program(ivc->verification_queue); + auto kernel = acir_format::create_circuit(mock_kernel_program, metadata); + auto kernel_vk = get_kernel_vk_from_circuit(kernel, trace_settings); + ivc->accumulate(kernel, kernel_vk); + } + + static void construct_and_accumulate_mock_app(std::shared_ptr ivc, TraceSettings trace_settings) + { + // construct a mock kernel program (acir) from the ivc verification queue + auto app_circuit = construct_mock_app_circuit(ivc); + ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); + } + + /** + * @brief Construct a kernel circuit VK from an acir program with IVC recursion constraints + * + * @param program Acir program representing a kernel circuit + * @param trace_settings needed for construction of the VK + * @return std::shared_ptr + */ + static std::shared_ptr construct_kernel_vk_from_acir_program( + AcirProgram& program, const TraceSettings& trace_settings) + { + // Create kernel circuit from the kernel program + Builder kernel = acir_format::create_circuit(program); + + // Manually construct the VK for the kernel circuit + auto prover_instance = std::make_shared(kernel, trace_settings); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + return verification_key; + } + + static std::shared_ptr get_kernel_vk_from_circuit(Builder& kernel, + TraceSettings trace_settings) + { + auto prover_instance = std::make_shared(kernel, trace_settings); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + return verification_key; + } + + protected: + void SetUp() override { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } +}; + +/** + * @brief Check that the size of a mock merge proof matches expectation + */ +TEST_F(IvcRecursionConstraintTest, MockMergeProofSize) +{ + Goblin::MergeProof merge_proof = create_mock_merge_proof(); + EXPECT_EQ(merge_proof.size(), MERGE_PROOF_SIZE); +} + +/** + * @brief Test IVC accumulation of a one app and one kernel; The kernel includes a recursive oink verification for the + * app, specified via an ACIR RecursionConstraint. + */ +TEST_F(IvcRecursionConstraintTest, AccumulateSingleApp) +{ + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; + auto ivc = std::make_shared(/*num_circuits=*/5 /* app, kernel, reset, tail, hiding */, trace_settings); + + // construct a mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + // Construct kernel consisting only of the kernel completion logic + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // add the trailing kernels + construct_and_accumulate_trailing_kernels(ivc, trace_settings); + + auto proof = ivc->prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc->get_vk())); +} + +/** + * @brief Test IVC accumulation of two apps and two kernels; The first kernel contains a recursive oink verification and + * the second contains two recursive PG verifications, all specified via ACIR RecursionConstraints. + */ +TEST_F(IvcRecursionConstraintTest, AccumulateTwoApps) +{ + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + // 4 ciruits and the tail kernel + auto ivc = std::make_shared(/*num_circuits=*/7, trace_settings); + + // construct a mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + const ProgramMetadata metadata{ ivc }; + + // Construct kernel_0; consists of a single oink recursive verification for app (plus databus/merge logic) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // construct a mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + // Construct and accumulate another Kernel circuit + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // Accumulate the trailing kernels + construct_and_accumulate_trailing_kernels(ivc, trace_settings); + + auto proof = ivc->prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc->get_vk())); +} + +// Test generation of "init" kernel VK via dummy IVC data +TEST_F(IvcRecursionConstraintTest, GenerateInitKernelVKFromConstraints) +{ + const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + + // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) + std::shared_ptr expected_kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + // Construct and accumulate mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + // Construct and accumulate kernel consisting only of the kernel completion logic + construct_and_accumulate_mock_kernel(ivc, trace_settings); + expected_kernel_vk = ivc->verification_queue.back().honk_vk; + } + + // Now, construct the kernel VK by mocking the post app accumulation state of the IVC + std::shared_ptr kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + // Construct kernel consisting only of the kernel completion logic + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::OINK, /*is_kernel=*/false); + AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); + program.witness = {}; // remove the witness to mimick VK construction context + + kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); + } + + // Compare the VK constructed via running the IVc with the one constructed via mocking + EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); +} + +// Test generation of "reset" kernel VK via dummy IVC data +TEST_F(IvcRecursionConstraintTest, GenerateResetKernelVKFromConstraints) +{ + const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + + // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) + std::shared_ptr expected_kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + const ProgramMetadata metadata{ ivc }; + + // Construct and accumulate mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + EXPECT_TRUE(ivc->verification_queue.size() == 1); + EXPECT_TRUE(ivc->verification_queue[0].type == bb::ClientIVC::QUEUE_TYPE::PG); + + // Construct and accumulate a mock RESET kernel (PG recursion for kernel accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + expected_kernel_vk = ivc->verification_queue.back().honk_vk; + } + + // Now, construct the kernel VK by mocking the IVC state prior to kernel construction + std::shared_ptr kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + // Construct kernel consisting only of the kernel completion logic + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); + AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); + program.witness = {}; // remove the witness to mimick VK construction context + kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); + } + + // Compare the VK constructed via running the IVc with the one constructed via mocking + EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); +} + +// Test generation of "tail" kernel VK via dummy IVC data +TEST_F(IvcRecursionConstraintTest, GenerateTailKernelVKFromConstraints) +{ + const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + + // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) + std::shared_ptr expected_kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + const ProgramMetadata metadata{ ivc }; + + // Construct and accumulate mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + + // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // Construct and accumulate a mock RESET kernel (PG recursion for kernel accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // Construct and accumulate a mock TAIL kernel (PG recursion for kernel accumulation) + EXPECT_TRUE(ivc->verification_queue.size() == 1); + EXPECT_TRUE(ivc->verification_queue[0].type == bb::ClientIVC::QUEUE_TYPE::PG_TAIL); + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + expected_kernel_vk = ivc->verification_queue.back().honk_vk; + } + + // Now, construct the kernel VK by mocking the IVC state prior to kernel construction + std::shared_ptr kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + + // Construct kernel consisting only of the kernel completion logic + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_TAIL, /*is_kernel=*/true); + AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); + program.witness = {}; // remove the witness to mimick VK construction context + + kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); + } + + // Compare the VK constructed via running the IVc with the one constructed via mocking + EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); +} + +// Test generation of "inner" kernel VK via dummy IVC data +TEST_F(IvcRecursionConstraintTest, GenerateInnerKernelVKFromConstraints) +{ + const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + + // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) + std::shared_ptr expected_kernel_vk; + { + // we have to set the number of circuits one more than the number of circuits we're accumulating as otherwise + // the last circuit will be seen as a tail + auto ivc = std::make_shared(/*num_circuits=*/6, trace_settings); + + const ProgramMetadata metadata{ ivc }; + + { // Construct and accumulate mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + } + + // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + { // Construct and accumulate a second mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + } + + { // Construct and accumulate a mock INNER kernel (PG recursion for kernel accumulation) + EXPECT_TRUE(ivc->verification_queue.size() == 2); + EXPECT_TRUE(ivc->verification_queue[1].type == bb::ClientIVC::QUEUE_TYPE::PG); + construct_and_accumulate_mock_kernel(ivc, trace_settings); + } + + expected_kernel_vk = ivc->verification_queue.back().honk_vk; + } + + // Now, construct the kernel VK by mocking the IVC state prior to kernel construction + std::shared_ptr kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/4, trace_settings); + + // Construct kernel consisting only of the kernel completion logic + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/true); + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG, /*is_kernel=*/false); + AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); + program.witness = {}; // remove the witness to mimick VK construction context + + kernel_vk = construct_kernel_vk_from_acir_program(program, trace_settings); + } + + // Compare the VK constructed via running the IVc with the one constructed via mocking + EXPECT_EQ(*kernel_vk.get(), *expected_kernel_vk.get()); +} + +// Test generation of "hiding" kernel VK via dummy IVC data +TEST_F(IvcRecursionConstraintTest, GenerateHidingKernelVKFromConstraints) +{ + const TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + + // First, construct the kernel VK by running the full IVC + std::shared_ptr expected_hiding_kernel_vk; + { + auto ivc = std::make_shared(/*num_circuits=*/5, trace_settings); + const ProgramMetadata metadata{ ivc }; + + { + // Construct and accumulate mock app_circuit + construct_and_accumulate_mock_app(ivc, trace_settings); + } + + { + // Construct and accumulate a mock INIT kernel (oink recursion for app accumulation) + construct_and_accumulate_mock_kernel(ivc, trace_settings); + } + + construct_and_accumulate_trailing_kernels(ivc, trace_settings); + + // The single entry in the verification queue corresponds to the hiding kernel + expected_hiding_kernel_vk = ivc->verification_queue[0].honk_vk; + } + + // Now, construct the kernel VK by mocking the IVC state prior to kernel construction + std::shared_ptr kernel_vk; + { + // mock IVC accumulation increases the num_circuits_accumualted, hence we need to assume the tail kernel has + // been accumulated + auto ivc = std::make_shared(/*num_circuits=*/5, TraceSettings()); + // construct a mock tail kernel + acir_format::mock_ivc_accumulation(ivc, ClientIVC::QUEUE_TYPE::PG_FINAL, /*is_kernel=*/true); + AcirProgram program = construct_mock_kernel_program(ivc->verification_queue); + program.witness = {}; // remove the witness to mimick VK construction context + kernel_vk = construct_kernel_vk_from_acir_program(program, TraceSettings()); + } + + // Compare the VK constructed via running the IVc with the one constructed via mocking + EXPECT_EQ(*kernel_vk.get(), *expected_hiding_kernel_vk.get()); +} + +/** + * @brief Test IVC accumulation of a one app and one kernel. The app includes a UltraHonk Recursive Verifier. + * This test was copied from the AccumulateTwo test. + */ +TEST_F(IvcRecursionConstraintTest, RecursiveVerifierAppCircuitTest) +{ + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + auto ivc = std::make_shared(/*num_circuits*/ 5, trace_settings); + + // construct a mock app_circuit with an UH recursion call + Builder app_circuit = construct_mock_UH_recursion_app_circuit(ivc, /*tamper_vk=*/false); + + // Complete instance and generate an oink proof + ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); + + // Construct kernel consisting only of the kernel completion logic + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + construct_and_accumulate_trailing_kernels(ivc, trace_settings); + + auto proof = ivc->prove(); + EXPECT_TRUE(ClientIVC::verify(proof, ivc->get_vk())); +} + +/** + * @brief Test IVC accumulation of a one app and one kernel. The app includes a UltraHonk Recursive Verifier that + * verifies a failed proof. This test was copied from the AccumulateTwo test. + */ +TEST_F(IvcRecursionConstraintTest, BadRecursiveVerifierAppCircuitTest) +{ + BB_DISABLE_ASSERTS(); // Disable assert in PG prover + + TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; + auto ivc = std::make_shared(/*num_circuits*/ 5, trace_settings); + + // construct and accumulate mock app_circuit that has bad pairing point object + Builder app_circuit = construct_mock_UH_recursion_app_circuit(ivc, /*tamper_vk=*/true); + ivc->accumulate(app_circuit, get_verification_key(app_circuit, trace_settings)); + + // Construct kernel consisting only of the kernel completion logic + construct_and_accumulate_mock_kernel(ivc, trace_settings); + + // add the trailing kernels + construct_and_accumulate_trailing_kernels(ivc, trace_settings); + + // We expect the CIVC proof to fail due to the app with a failed UH recursive verification + auto proof = ivc->prove(); + EXPECT_FALSE(ClientIVC::verify(proof, ivc->get_vk())); +} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.cpp index f47968c0d2e9..2072c262fe3e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.cpp @@ -17,11 +17,10 @@ using namespace bb; template void create_poseidon2_permutations(Builder& builder, const Poseidon2Constraint& constraint) { using field_ct = stdlib::field_t; - using Poseidon2Params = crypto::Poseidon2Bn254ScalarFieldParams; - using State = std::array; + using State = stdlib::Poseidon2Permutation::State; - BB_ASSERT_EQ(constraint.state.size(), constraint.len); - BB_ASSERT_EQ(constraint.result.size(), constraint.len); + BB_ASSERT_EQ(constraint.state.size(), 4U); + BB_ASSERT_EQ(constraint.result.size(), 4U); // Get the witness assignment for each witness index // Write the witness assignment to the byte array state State state; @@ -29,19 +28,9 @@ template void create_poseidon2_permutations(Builder& builder, state[i] = to_field_ct(constraint.state[i], builder); } State output_state; - output_state = stdlib::Poseidon2Permutation::permutation(&builder, state); + output_state = stdlib::Poseidon2Permutation::permutation(&builder, state); for (size_t i = 0; i < output_state.size(); ++i) { - poly_triple assert_equal{ - .a = output_state[i].normalize().witness_index, - .b = constraint.result[i], - .c = 0, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = 0, - .q_c = 0, - }; - builder.create_poly_gate(assert_equal); + output_state[i].assert_equal(field_ct::from_witness_index(&builder, constraint.result[i])); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.hpp index 17733704c56f..c00315bb1f0c 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.hpp @@ -15,13 +15,12 @@ namespace acir_format { struct Poseidon2Constraint { std::vector> state; std::vector result; - uint32_t len; // For serialization, update with any new fields - MSGPACK_FIELDS(state, result, len); + MSGPACK_FIELDS(state, result); friend bool operator==(Poseidon2Constraint const& lhs, Poseidon2Constraint const& rhs) = default; }; template void create_poseidon2_permutations(Builder& builder, const Poseidon2Constraint& constraint); -} // namespace acir_format \ No newline at end of file +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.test.cpp index b7934091fab5..e12c14d42a2c 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/poseidon2_constraint.test.cpp @@ -33,37 +33,13 @@ TEST_F(Poseidon2Tests, TestPoseidon2Permutation) WitnessOrConstant::from_index(4), }, .result = { 5, 6, 7, 8, }, - .len = 4, }; AcirFormat constraint_system{ .varnum = 9, .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, - .sha256_compression = {}, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, .poseidon2_constraints = { poseidon2_constraint }, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/proof_surgeon.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/proof_surgeon.hpp index a980fc1cdea4..5a9a75ac9a03 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/proof_surgeon.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/proof_surgeon.hpp @@ -7,6 +7,7 @@ #pragma once #include "barretenberg/common/map.hpp" #include "barretenberg/common/serialize.hpp" +#include "barretenberg/dsl/acir_format/utils.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" @@ -18,56 +19,8 @@ namespace acir_format { -class ProofSurgeon { - using FF = bb::fr; - - // construct a string of the form "[, , ...]" - static std::string to_json(const std::vector& data) - { - return format( - "[", bb::join(bb::transform::map(data, [](auto fr) { return format("\"", fr, "\""); }), ", "), "]"); - } - +template class ProofSurgeon { public: - /** - * @brief Construct a string containing the inputs to a noir verify_proof call (to be written to a .toml) - * - * @param proof A complete bberg style proof (i.e. contains the public inputs) - * @param verification_key - * @param toml_path - */ - template - static std::string construct_recursion_inputs_toml_data(std::vector& proof, - const std::shared_ptr& verification_key, - bool ipa_accumulation) - { - // Convert verification key to fields - std::vector vk_fields = verification_key->to_field_elements(); - - // Get public inputs by cutting them out of the proof - size_t num_public_inputs_to_extract = - ipa_accumulation - ? static_cast(verification_key->num_public_inputs) - bb::RollupIO::PUBLIC_INPUTS_SIZE - : static_cast(verification_key->num_public_inputs) - bb::DefaultIO::PUBLIC_INPUTS_SIZE; - debug("proof size: ", proof.size()); - debug("number of public inputs to extract: ", num_public_inputs_to_extract); - std::vector public_inputs = - acir_format::ProofSurgeon::cut_public_inputs_from_proof(proof, num_public_inputs_to_extract); - - // Construct json-style output for each component - std::string proof_json = to_json(proof); - std::string pub_inputs_json = to_json(public_inputs); - std::string vk_json = to_json(vk_fields); - - // Format with labels for noir recursion input - std::string toml_content = "key_hash = " + format("\"", FF(0), "\"") + "\n"; // not used by honk - toml_content += "proof = " + proof_json + "\n"; - toml_content += "public_inputs = " + pub_inputs_json + "\n"; - toml_content += "verification_key = " + vk_json + "\n"; - - return toml_content; - } - /** * @brief Reconstruct a bberg style proof from a acir style proof + public inputs * @details Insert the public inputs in the middle the proof fields after 'inner_public_input_offset' because this @@ -96,17 +49,17 @@ class ProofSurgeon { * * @param proof_witnesses Witness values of a bberg style proof containing public inputs * @param num_public_inputs The number of public inputs to extract from the proof - * @return std::vector The extracted public input witness values + * @return std::vector The extracted public input witness values */ - static std::vector cut_public_inputs_from_proof(std::vector& proof_witnesses, - const size_t num_public_inputs_to_extract) + static std::vector cut_public_inputs_from_proof(std::vector& proof_witnesses, + const size_t num_public_inputs_to_extract) { // Construct iterators pointing to the start and end of the public inputs within the proof auto pub_inputs_begin_itr = proof_witnesses.begin(); auto pub_inputs_end_itr = proof_witnesses.begin() + static_cast(num_public_inputs_to_extract); // Construct the isolated public inputs - std::vector public_input_witnesses{ pub_inputs_begin_itr, pub_inputs_end_itr }; + std::vector public_input_witnesses{ pub_inputs_begin_itr, pub_inputs_end_itr }; // Erase the public inputs from the proof proof_witnesses.erase(pub_inputs_begin_itr, pub_inputs_end_itr); @@ -119,7 +72,7 @@ class ProofSurgeon { * * @param proof A bberg style stdlib proof (contains public inputs) * @param num_public_inputs The number of public input witness indices to get from the proof - * @return std::vector The corresponding public input witness indices + * @return std::vector The corresponding public input witness indices */ static std::vector get_public_inputs_witness_indices_from_proof( const bb::stdlib::Proof& proof, const size_t num_public_inputs_to_extract) @@ -156,34 +109,23 @@ class ProofSurgeon { * @param num_public_inputs * @return RecursionWitnessData */ - static RecursionWitnessData populate_recursion_witness_data(bb::SlabVector& witness, - std::vector& proof_witnesses, - const std::vector& key_witnesses, - const bb::fr& key_hash_witness, + static RecursionWitnessData populate_recursion_witness_data(bb::SlabVector& witness, + std::vector& proof_witnesses, + const std::vector& key_witnesses, + const FF& key_hash_witness, const size_t num_public_inputs_to_extract) { // Extract all public inputs except for those corresponding to the aggregation object - std::vector public_input_witnesses = + std::vector public_input_witnesses = cut_public_inputs_from_proof(proof_witnesses, num_public_inputs_to_extract); - // Helper to append some values to the witness vector and return their corresponding indices - auto add_to_witness_and_track_indices = [](bb::SlabVector& witness, - const std::vector& input) -> std::vector { - std::vector indices; - indices.reserve(input.size()); - auto witness_idx = static_cast(witness.size()); - for (const auto& value : input) { - witness.push_back(value); - indices.push_back(witness_idx++); - } - return indices; - }; - // Append key, proof, and public inputs while storing the associated witness indices - std::vector key_indices = add_to_witness_and_track_indices(witness, key_witnesses); - uint32_t key_hash_index = add_to_witness_and_track_indices(witness, { key_hash_witness })[0]; - std::vector proof_indices = add_to_witness_and_track_indices(witness, proof_witnesses); - std::vector public_input_indices = add_to_witness_and_track_indices(witness, public_input_witnesses); + std::vector key_indices = add_to_witness_and_track_indices(witness, key_witnesses); + uint32_t key_hash_index = static_cast(witness.size()); + witness.emplace_back(key_hash_witness); + std::vector proof_indices = add_to_witness_and_track_indices(witness, proof_witnesses); + std::vector public_input_indices = + add_to_witness_and_track_indices(witness, public_input_witnesses); return { key_indices, key_hash_index, proof_indices, public_input_indices }; } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.hpp index 9933241e3326..9ee0f04958a8 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.hpp @@ -6,6 +6,8 @@ #pragma once #include "barretenberg/common/serialize.hpp" +#include "barretenberg/dsl/acir_format/witness_constant.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" #include #include @@ -15,7 +17,7 @@ namespace acir_format { // ACIR // Keep this enum values in sync with their noir counterpart constants defined in // noir-protocol-circuits/crates/types/src/constants.nr -enum PROOF_TYPE { PLONK, HONK, OINK, PG, AVM, ROLLUP_HONK, ROOT_ROLLUP_HONK, HONK_ZK, PG_FINAL, PG_TAIL }; +enum PROOF_TYPE { PLONK, HONK, OINK, PG, AVM, ROLLUP_HONK, ROOT_ROLLUP_HONK, HONK_ZK, PG_FINAL, PG_TAIL, CIVC }; /** * @brief RecursionConstraint struct contains information required to recursively verify a proof! @@ -54,16 +56,26 @@ enum PROOF_TYPE { PLONK, HONK, OINK, PG, AVM, ROLLUP_HONK, ROOT_ROLLUP_HONK, HON * TODO(https://github.com/AztecProtocol/barretenberg/issues/996): Create similar comments for Honk. */ struct RecursionConstraint { - // An aggregation state is represented by two G1 affine elements. Each G1 point has - // two field element coordinates (x, y). Thus, four field elements - static constexpr size_t NUM_AGGREGATION_ELEMENTS = 4; std::vector key; std::vector proof; std::vector public_inputs; uint32_t key_hash; uint32_t proof_type; + WitnessOrConstant predicate; friend bool operator==(RecursionConstraint const& lhs, RecursionConstraint const& rhs) = default; + + template + static std::vector> fields_from_witnesses(Builder& builder, + const std::vector& witness_indices) + { + std::vector> result; + result.reserve(witness_indices.size()); + for (const auto& idx : witness_indices) { + result.emplace_back(bb::stdlib::field_t::from_witness_index(&builder, idx)); + } + return result; + } }; template inline void read(B& buf, RecursionConstraint& constraint) @@ -73,6 +85,7 @@ template inline void read(B& buf, RecursionConstraint& constraint) read(buf, constraint.proof); read(buf, constraint.public_inputs); read(buf, constraint.key_hash); + read(buf, constraint.predicate); } template inline void write(B& buf, RecursionConstraint const& constraint) @@ -82,6 +95,7 @@ template inline void write(B& buf, RecursionConstraint const& const write(buf, constraint.proof); write(buf, constraint.public_inputs); write(buf, constraint.key_hash); + write(buf, constraint.predicate); } } // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp index feb4d38ad96d..caf05278d013 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp @@ -1161,169 +1161,9 @@ struct BlackBoxOp { } }; - struct BigIntAdd { - Acir::MemoryAddress lhs; - Acir::MemoryAddress rhs; - Acir::MemoryAddress output; - - friend bool operator==(const BigIntAdd&, const BigIntAdd&); - std::vector bincodeSerialize() const; - static BigIntAdd bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntAdd"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntSub { - Acir::MemoryAddress lhs; - Acir::MemoryAddress rhs; - Acir::MemoryAddress output; - - friend bool operator==(const BigIntSub&, const BigIntSub&); - std::vector bincodeSerialize() const; - static BigIntSub bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntSub"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntMul { - Acir::MemoryAddress lhs; - Acir::MemoryAddress rhs; - Acir::MemoryAddress output; - - friend bool operator==(const BigIntMul&, const BigIntMul&); - std::vector bincodeSerialize() const; - static BigIntMul bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntMul"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntDiv { - Acir::MemoryAddress lhs; - Acir::MemoryAddress rhs; - Acir::MemoryAddress output; - - friend bool operator==(const BigIntDiv&, const BigIntDiv&); - std::vector bincodeSerialize() const; - static BigIntDiv bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntDiv"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntFromLeBytes { - Acir::HeapVector inputs; - Acir::HeapVector modulus; - Acir::MemoryAddress output; - - friend bool operator==(const BigIntFromLeBytes&, const BigIntFromLeBytes&); - std::vector bincodeSerialize() const; - static BigIntFromLeBytes bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("inputs", inputs)); - packer.pack(std::make_pair("modulus", modulus)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntFromLeBytes"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "inputs", inputs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "modulus", modulus, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntToLeBytes { - Acir::MemoryAddress input; - Acir::HeapVector output; - - friend bool operator==(const BigIntToLeBytes&, const BigIntToLeBytes&); - std::vector bincodeSerialize() const; - static BigIntToLeBytes bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(2); - packer.pack(std::make_pair("input", input)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntToLeBytes"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "input", input, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - struct Poseidon2Permutation { Acir::HeapVector message; Acir::HeapArray output; - Acir::MemoryAddress len; friend bool operator==(const Poseidon2Permutation&, const Poseidon2Permutation&); std::vector bincodeSerialize() const; @@ -1331,10 +1171,9 @@ struct BlackBoxOp { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(2); packer.pack(std::make_pair("message", message)); packer.pack(std::make_pair("output", output)); - packer.pack(std::make_pair("len", len)); } void msgpack_unpack(msgpack::object const& o) @@ -1343,7 +1182,6 @@ struct BlackBoxOp { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "message", message, false); Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "len", len, false); } }; @@ -1415,12 +1253,6 @@ struct BlackBoxOp { EcdsaSecp256r1, MultiScalarMul, EmbeddedCurveAdd, - BigIntAdd, - BigIntSub, - BigIntMul, - BigIntDiv, - BigIntFromLeBytes, - BigIntToLeBytes, Poseidon2Permutation, Sha256Compression, ToRadix> @@ -1469,38 +1301,14 @@ struct BlackBoxOp { is_unit = false; break; case 8: - tag = "BigIntAdd"; - is_unit = false; - break; - case 9: - tag = "BigIntSub"; - is_unit = false; - break; - case 10: - tag = "BigIntMul"; - is_unit = false; - break; - case 11: - tag = "BigIntDiv"; - is_unit = false; - break; - case 12: - tag = "BigIntFromLeBytes"; - is_unit = false; - break; - case 13: - tag = "BigIntToLeBytes"; - is_unit = false; - break; - case 14: tag = "Poseidon2Permutation"; is_unit = false; break; - case 15: + case 9: tag = "Sha256Compression"; is_unit = false; break; - case 16: + case 10: tag = "ToRadix"; is_unit = false; break; @@ -1620,66 +1428,6 @@ struct BlackBoxOp { throw_or_abort("error converting into enum variant 'BlackBoxOp::EmbeddedCurveAdd'"); } - value = v; - } else if (tag == "BigIntAdd") { - BigIntAdd v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntAdd'"); - } - - value = v; - } else if (tag == "BigIntSub") { - BigIntSub v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntSub'"); - } - - value = v; - } else if (tag == "BigIntMul") { - BigIntMul v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntMul'"); - } - - value = v; - } else if (tag == "BigIntDiv") { - BigIntDiv v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntDiv'"); - } - - value = v; - } else if (tag == "BigIntFromLeBytes") { - BigIntFromLeBytes v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntFromLeBytes'"); - } - - value = v; - } else if (tag == "BigIntToLeBytes") { - BigIntToLeBytes v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxOp::BigIntToLeBytes'"); - } - value = v; } else if (tag == "Poseidon2Permutation") { Poseidon2Permutation v; @@ -2164,30 +1912,6 @@ struct BrilligOpcode { } }; - struct JumpIfNot { - Acir::MemoryAddress condition; - uint64_t location; - - friend bool operator==(const JumpIfNot&, const JumpIfNot&); - std::vector bincodeSerialize() const; - static JumpIfNot bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(2); - packer.pack(std::make_pair("condition", condition)); - packer.pack(std::make_pair("location", location)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "JumpIfNot"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "condition", condition, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "location", location, false); - } - }; - struct JumpIf { Acir::MemoryAddress condition; uint64_t location; @@ -2284,7 +2008,7 @@ struct BrilligOpcode { struct Const { Acir::MemoryAddress destination; Acir::BitSize bit_size; - std::string value; + std::vector value; friend bool operator==(const Const&, const Const&); std::vector bincodeSerialize() const; @@ -2311,7 +2035,7 @@ struct BrilligOpcode { struct IndirectConst { Acir::MemoryAddress destination_pointer; Acir::BitSize bit_size; - std::string value; + std::vector value; friend bool operator==(const IndirectConst&, const IndirectConst&); std::vector bincodeSerialize() const; @@ -2545,7 +2269,6 @@ struct BrilligOpcode { BinaryIntOp, Not, Cast, - JumpIfNot, JumpIf, Jump, CalldataCopy, @@ -2590,66 +2313,62 @@ struct BrilligOpcode { is_unit = false; break; case 4: - tag = "JumpIfNot"; - is_unit = false; - break; - case 5: tag = "JumpIf"; is_unit = false; break; - case 6: + case 5: tag = "Jump"; is_unit = false; break; - case 7: + case 6: tag = "CalldataCopy"; is_unit = false; break; - case 8: + case 7: tag = "Call"; is_unit = false; break; - case 9: + case 8: tag = "Const"; is_unit = false; break; - case 10: + case 9: tag = "IndirectConst"; is_unit = false; break; - case 11: + case 10: tag = "Return"; is_unit = true; break; - case 12: + case 11: tag = "ForeignCall"; is_unit = false; break; - case 13: + case 12: tag = "Mov"; is_unit = false; break; - case 14: + case 13: tag = "ConditionalMov"; is_unit = false; break; - case 15: + case 14: tag = "Load"; is_unit = false; break; - case 16: + case 15: tag = "Store"; is_unit = false; break; - case 17: + case 16: tag = "BlackBox"; is_unit = false; break; - case 18: + case 17: tag = "Trap"; is_unit = false; break; - case 19: + case 18: tag = "Stop"; is_unit = false; break; @@ -2729,16 +2448,6 @@ struct BrilligOpcode { throw_or_abort("error converting into enum variant 'BrilligOpcode::Cast'"); } - value = v; - } else if (tag == "JumpIfNot") { - JumpIfNot v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BrilligOpcode::JumpIfNot'"); - } - value = v; } else if (tag == "JumpIf") { JumpIf v; @@ -2910,10 +2619,10 @@ struct Witness { } }; -struct ConstantOrWitnessEnum { +struct FunctionInput { struct Constant { - std::string value; + std::vector value; friend bool operator==(const Constant&, const Constant&); std::vector bincodeSerialize() const; @@ -2954,9 +2663,9 @@ struct ConstantOrWitnessEnum { std::variant value; - friend bool operator==(const ConstantOrWitnessEnum&, const ConstantOrWitnessEnum&); + friend bool operator==(const FunctionInput&, const FunctionInput&); std::vector bincodeSerialize() const; - static ConstantOrWitnessEnum bincodeDeserialize(std::vector); + static FunctionInput bincodeDeserialize(std::vector); void msgpack_pack(auto& packer) const { @@ -2973,7 +2682,7 @@ struct ConstantOrWitnessEnum { is_unit = false; break; default: - throw_or_abort("unknown enum 'ConstantOrWitnessEnum' variant index: " + std::to_string(value.index())); + throw_or_abort("unknown enum 'FunctionInput' variant index: " + std::to_string(value.index())); } if (is_unit) { packer.pack(tag); @@ -2993,10 +2702,10 @@ struct ConstantOrWitnessEnum { if (o.type != msgpack::type::object_type::MAP && o.type != msgpack::type::object_type::STR) { std::cerr << o << std::endl; - throw_or_abort("expected MAP or STR for enum 'ConstantOrWitnessEnum'; got type " + std::to_string(o.type)); + throw_or_abort("expected MAP or STR for enum 'FunctionInput'; got type " + std::to_string(o.type)); } if (o.type == msgpack::type::object_type::MAP && o.via.map.size != 1) { - throw_or_abort("expected 1 entry for enum 'ConstantOrWitnessEnum'; got " + std::to_string(o.via.map.size)); + throw_or_abort("expected 1 entry for enum 'FunctionInput'; got " + std::to_string(o.via.map.size)); } std::string tag; try { @@ -3007,7 +2716,7 @@ struct ConstantOrWitnessEnum { } } catch (const msgpack::type_error&) { std::cerr << o << std::endl; - throw_or_abort("error converting tag to string for enum 'ConstantOrWitnessEnum'"); + throw_or_abort("error converting tag to string for enum 'FunctionInput'"); } if (tag == "Constant") { Constant v; @@ -3015,7 +2724,7 @@ struct ConstantOrWitnessEnum { o.via.map.ptr[0].val.convert(v); } catch (const msgpack::type_error&) { std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'ConstantOrWitnessEnum::Constant'"); + throw_or_abort("error converting into enum variant 'FunctionInput::Constant'"); } value = v; @@ -3025,42 +2734,18 @@ struct ConstantOrWitnessEnum { o.via.map.ptr[0].val.convert(v); } catch (const msgpack::type_error&) { std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'ConstantOrWitnessEnum::Witness'"); + throw_or_abort("error converting into enum variant 'FunctionInput::Witness'"); } value = v; } else { std::cerr << o << std::endl; - throw_or_abort("unknown 'ConstantOrWitnessEnum' enum variant: " + tag); + throw_or_abort("unknown 'FunctionInput' enum variant: " + tag); } } }; -struct FunctionInput { - Acir::ConstantOrWitnessEnum input; - uint32_t num_bits; - - friend bool operator==(const FunctionInput&, const FunctionInput&); - std::vector bincodeSerialize() const; - static FunctionInput bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(2); - packer.pack(std::make_pair("input", input)); - packer.pack(std::make_pair("num_bits", num_bits)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "FunctionInput"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "input", input, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "num_bits", num_bits, false); - } -}; - -struct BlackBoxFuncCall { +struct BlackBoxFuncCall { struct AES128Encrypt { std::vector inputs; @@ -3095,6 +2780,7 @@ struct BlackBoxFuncCall { struct AND { Acir::FunctionInput lhs; Acir::FunctionInput rhs; + uint32_t num_bits; Acir::Witness output; friend bool operator==(const AND&, const AND&); @@ -3103,9 +2789,10 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(4); packer.pack(std::make_pair("lhs", lhs)); packer.pack(std::make_pair("rhs", rhs)); + packer.pack(std::make_pair("num_bits", num_bits)); packer.pack(std::make_pair("output", output)); } @@ -3115,6 +2802,7 @@ struct BlackBoxFuncCall { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "num_bits", num_bits, false); Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); } }; @@ -3122,6 +2810,7 @@ struct BlackBoxFuncCall { struct XOR { Acir::FunctionInput lhs; Acir::FunctionInput rhs; + uint32_t num_bits; Acir::Witness output; friend bool operator==(const XOR&, const XOR&); @@ -3130,9 +2819,10 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(4); packer.pack(std::make_pair("lhs", lhs)); packer.pack(std::make_pair("rhs", rhs)); + packer.pack(std::make_pair("num_bits", num_bits)); packer.pack(std::make_pair("output", output)); } @@ -3142,12 +2832,14 @@ struct BlackBoxFuncCall { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "num_bits", num_bits, false); Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); } }; struct RANGE { Acir::FunctionInput input; + uint32_t num_bits; friend bool operator==(const RANGE&, const RANGE&); std::vector bincodeSerialize() const; @@ -3155,8 +2847,9 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(1); + packer.pack_map(2); packer.pack(std::make_pair("input", input)); + packer.pack(std::make_pair("num_bits", num_bits)); } void msgpack_unpack(msgpack::object const& o) @@ -3164,6 +2857,7 @@ struct BlackBoxFuncCall { auto name = "RANGE"; auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "input", input, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "num_bits", num_bits, false); } }; @@ -3220,6 +2914,7 @@ struct BlackBoxFuncCall { std::shared_ptr> public_key_y; std::shared_ptr> signature; std::shared_ptr> hashed_message; + Acir::FunctionInput predicate; Acir::Witness output; friend bool operator==(const EcdsaSecp256k1&, const EcdsaSecp256k1&); @@ -3228,11 +2923,12 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(5); + packer.pack_map(6); packer.pack(std::make_pair("public_key_x", public_key_x)); packer.pack(std::make_pair("public_key_y", public_key_y)); packer.pack(std::make_pair("signature", signature)); packer.pack(std::make_pair("hashed_message", hashed_message)); + packer.pack(std::make_pair("predicate", predicate)); packer.pack(std::make_pair("output", output)); } @@ -3244,6 +2940,7 @@ struct BlackBoxFuncCall { Helpers::conv_fld_from_kvmap(kvmap, name, "public_key_y", public_key_y, false); Helpers::conv_fld_from_kvmap(kvmap, name, "signature", signature, false); Helpers::conv_fld_from_kvmap(kvmap, name, "hashed_message", hashed_message, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, false); Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); } }; @@ -3253,6 +2950,7 @@ struct BlackBoxFuncCall { std::shared_ptr> public_key_y; std::shared_ptr> signature; std::shared_ptr> hashed_message; + Acir::FunctionInput predicate; Acir::Witness output; friend bool operator==(const EcdsaSecp256r1&, const EcdsaSecp256r1&); @@ -3261,11 +2959,12 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(5); + packer.pack_map(6); packer.pack(std::make_pair("public_key_x", public_key_x)); packer.pack(std::make_pair("public_key_y", public_key_y)); packer.pack(std::make_pair("signature", signature)); packer.pack(std::make_pair("hashed_message", hashed_message)); + packer.pack(std::make_pair("predicate", predicate)); packer.pack(std::make_pair("output", output)); } @@ -3277,6 +2976,7 @@ struct BlackBoxFuncCall { Helpers::conv_fld_from_kvmap(kvmap, name, "public_key_y", public_key_y, false); Helpers::conv_fld_from_kvmap(kvmap, name, "signature", signature, false); Helpers::conv_fld_from_kvmap(kvmap, name, "hashed_message", hashed_message, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, false); Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); } }; @@ -3284,6 +2984,7 @@ struct BlackBoxFuncCall { struct MultiScalarMul { std::vector points; std::vector scalars; + Acir::FunctionInput predicate; std::shared_ptr> outputs; friend bool operator==(const MultiScalarMul&, const MultiScalarMul&); @@ -3292,9 +2993,10 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(4); packer.pack(std::make_pair("points", points)); packer.pack(std::make_pair("scalars", scalars)); + packer.pack(std::make_pair("predicate", predicate)); packer.pack(std::make_pair("outputs", outputs)); } @@ -3304,6 +3006,7 @@ struct BlackBoxFuncCall { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "points", points, false); Helpers::conv_fld_from_kvmap(kvmap, name, "scalars", scalars, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, false); Helpers::conv_fld_from_kvmap(kvmap, name, "outputs", outputs, false); } }; @@ -3311,6 +3014,7 @@ struct BlackBoxFuncCall { struct EmbeddedCurveAdd { std::shared_ptr> input1; std::shared_ptr> input2; + Acir::FunctionInput predicate; std::shared_ptr> outputs; friend bool operator==(const EmbeddedCurveAdd&, const EmbeddedCurveAdd&); @@ -3319,9 +3023,10 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(4); packer.pack(std::make_pair("input1", input1)); packer.pack(std::make_pair("input2", input2)); + packer.pack(std::make_pair("predicate", predicate)); packer.pack(std::make_pair("outputs", outputs)); } @@ -3331,6 +3036,7 @@ struct BlackBoxFuncCall { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "input1", input1, false); Helpers::conv_fld_from_kvmap(kvmap, name, "input2", input2, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, false); Helpers::conv_fld_from_kvmap(kvmap, name, "outputs", outputs, false); } }; @@ -3365,6 +3071,7 @@ struct BlackBoxFuncCall { std::vector public_inputs; Acir::FunctionInput key_hash; uint32_t proof_type; + Acir::FunctionInput predicate; friend bool operator==(const RecursiveAggregation&, const RecursiveAggregation&); std::vector bincodeSerialize() const; @@ -3372,12 +3079,13 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(5); + packer.pack_map(6); packer.pack(std::make_pair("verification_key", verification_key)); packer.pack(std::make_pair("proof", proof)); packer.pack(std::make_pair("public_inputs", public_inputs)); packer.pack(std::make_pair("key_hash", key_hash)); packer.pack(std::make_pair("proof_type", proof_type)); + packer.pack(std::make_pair("predicate", predicate)); } void msgpack_unpack(msgpack::object const& o) @@ -3389,172 +3097,13 @@ struct BlackBoxFuncCall { Helpers::conv_fld_from_kvmap(kvmap, name, "public_inputs", public_inputs, false); Helpers::conv_fld_from_kvmap(kvmap, name, "key_hash", key_hash, false); Helpers::conv_fld_from_kvmap(kvmap, name, "proof_type", proof_type, false); - } - }; - - struct BigIntAdd { - uint32_t lhs; - uint32_t rhs; - uint32_t output; - - friend bool operator==(const BigIntAdd&, const BigIntAdd&); - std::vector bincodeSerialize() const; - static BigIntAdd bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntAdd"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntSub { - uint32_t lhs; - uint32_t rhs; - uint32_t output; - - friend bool operator==(const BigIntSub&, const BigIntSub&); - std::vector bincodeSerialize() const; - static BigIntSub bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntSub"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntMul { - uint32_t lhs; - uint32_t rhs; - uint32_t output; - - friend bool operator==(const BigIntMul&, const BigIntMul&); - std::vector bincodeSerialize() const; - static BigIntMul bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntMul"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntDiv { - uint32_t lhs; - uint32_t rhs; - uint32_t output; - - friend bool operator==(const BigIntDiv&, const BigIntDiv&); - std::vector bincodeSerialize() const; - static BigIntDiv bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("lhs", lhs)); - packer.pack(std::make_pair("rhs", rhs)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntDiv"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "lhs", lhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "rhs", rhs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntFromLeBytes { - std::vector inputs; - std::vector modulus; - uint32_t output; - - friend bool operator==(const BigIntFromLeBytes&, const BigIntFromLeBytes&); - std::vector bincodeSerialize() const; - static BigIntFromLeBytes bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(3); - packer.pack(std::make_pair("inputs", inputs)); - packer.pack(std::make_pair("modulus", modulus)); - packer.pack(std::make_pair("output", output)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntFromLeBytes"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "inputs", inputs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "modulus", modulus, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "output", output, false); - } - }; - - struct BigIntToLeBytes { - uint32_t input; - std::vector outputs; - - friend bool operator==(const BigIntToLeBytes&, const BigIntToLeBytes&); - std::vector bincodeSerialize() const; - static BigIntToLeBytes bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(2); - packer.pack(std::make_pair("input", input)); - packer.pack(std::make_pair("outputs", outputs)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "BigIntToLeBytes"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "input", input, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "outputs", outputs, false); + Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, false); } }; struct Poseidon2Permutation { std::vector inputs; std::vector outputs; - uint32_t len; friend bool operator==(const Poseidon2Permutation&, const Poseidon2Permutation&); std::vector bincodeSerialize() const; @@ -3562,10 +3111,9 @@ struct BlackBoxFuncCall { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(2); packer.pack(std::make_pair("inputs", inputs)); packer.pack(std::make_pair("outputs", outputs)); - packer.pack(std::make_pair("len", len)); } void msgpack_unpack(msgpack::object const& o) @@ -3574,7 +3122,6 @@ struct BlackBoxFuncCall { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "inputs", inputs, false); Helpers::conv_fld_from_kvmap(kvmap, name, "outputs", outputs, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "len", len, false); } }; @@ -3617,12 +3164,6 @@ struct BlackBoxFuncCall { EmbeddedCurveAdd, Keccakf1600, RecursiveAggregation, - BigIntAdd, - BigIntSub, - BigIntMul, - BigIntDiv, - BigIntFromLeBytes, - BigIntToLeBytes, Poseidon2Permutation, Sha256Compression> value; @@ -3686,34 +3227,10 @@ struct BlackBoxFuncCall { is_unit = false; break; case 12: - tag = "BigIntAdd"; - is_unit = false; - break; - case 13: - tag = "BigIntSub"; - is_unit = false; - break; - case 14: - tag = "BigIntMul"; - is_unit = false; - break; - case 15: - tag = "BigIntDiv"; - is_unit = false; - break; - case 16: - tag = "BigIntFromLeBytes"; - is_unit = false; - break; - case 17: - tag = "BigIntToLeBytes"; - is_unit = false; - break; - case 18: tag = "Poseidon2Permutation"; is_unit = false; break; - case 19: + case 13: tag = "Sha256Compression"; is_unit = false; break; @@ -3873,66 +3390,6 @@ struct BlackBoxFuncCall { throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::RecursiveAggregation'"); } - value = v; - } else if (tag == "BigIntAdd") { - BigIntAdd v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntAdd'"); - } - - value = v; - } else if (tag == "BigIntSub") { - BigIntSub v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntSub'"); - } - - value = v; - } else if (tag == "BigIntMul") { - BigIntMul v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntMul'"); - } - - value = v; - } else if (tag == "BigIntDiv") { - BigIntDiv v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntDiv'"); - } - - value = v; - } else if (tag == "BigIntFromLeBytes") { - BigIntFromLeBytes v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntFromLeBytes'"); - } - - value = v; - } else if (tag == "BigIntToLeBytes") { - BigIntToLeBytes v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'BlackBoxFuncCall::BigIntToLeBytes'"); - } - value = v; } else if (tag == "Poseidon2Permutation") { Poseidon2Permutation v; @@ -4106,9 +3563,9 @@ struct BlockType { }; struct Expression { - std::vector> mul_terms; - std::vector> linear_combinations; - std::string q_c; + std::vector, Acir::Witness, Acir::Witness>> mul_terms; + std::vector, Acir::Witness>> linear_combinations; + std::vector q_c; friend bool operator==(const Expression&, const Expression&); std::vector bincodeSerialize() const; @@ -4490,7 +3947,6 @@ struct Opcode { struct MemoryOp { Acir::BlockId block_id; Acir::MemOp op; - std::optional predicate; friend bool operator==(const MemoryOp&, const MemoryOp&); std::vector bincodeSerialize() const; @@ -4498,10 +3954,9 @@ struct Opcode { void msgpack_pack(auto& packer) const { - packer.pack_map(3); + packer.pack_map(2); packer.pack(std::make_pair("block_id", block_id)); packer.pack(std::make_pair("op", op)); - packer.pack(std::make_pair("predicate", predicate)); } void msgpack_unpack(msgpack::object const& o) @@ -4510,7 +3965,6 @@ struct Opcode { auto kvmap = Helpers::make_kvmap(o, name); Helpers::conv_fld_from_kvmap(kvmap, name, "block_id", block_id, false); Helpers::conv_fld_from_kvmap(kvmap, name, "op", op, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "predicate", predicate, true); } }; @@ -4891,150 +4345,41 @@ struct AssertionPayload { } }; -struct ExpressionWidth { +struct OpcodeLocation { - struct Unbounded { - friend bool operator==(const Unbounded&, const Unbounded&); + struct Acir { + uint64_t value; + + friend bool operator==(const Acir&, const Acir&); std::vector bincodeSerialize() const; - static Unbounded bincodeDeserialize(std::vector); + static Acir bincodeDeserialize(std::vector); - void msgpack_pack(auto& packer) const {} - void msgpack_unpack(msgpack::object const& o) {} + void msgpack_pack(auto& packer) const { packer.pack(value); } + + void msgpack_unpack(msgpack::object const& o) + { + try { + o.convert(value); + } catch (const msgpack::type_error&) { + std::cerr << o << std::endl; + throw_or_abort("error converting into newtype 'Acir'"); + } + } }; - struct Bounded { - uint64_t width; + struct Brillig { + uint64_t acir_index; + uint64_t brillig_index; - friend bool operator==(const Bounded&, const Bounded&); + friend bool operator==(const Brillig&, const Brillig&); std::vector bincodeSerialize() const; - static Bounded bincodeDeserialize(std::vector); + static Brillig bincodeDeserialize(std::vector); void msgpack_pack(auto& packer) const { - packer.pack_map(1); - packer.pack(std::make_pair("width", width)); - } - - void msgpack_unpack(msgpack::object const& o) - { - auto name = "Bounded"; - auto kvmap = Helpers::make_kvmap(o, name); - Helpers::conv_fld_from_kvmap(kvmap, name, "width", width, false); - } - }; - - std::variant value; - - friend bool operator==(const ExpressionWidth&, const ExpressionWidth&); - std::vector bincodeSerialize() const; - static ExpressionWidth bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - std::string tag; - bool is_unit; - switch (value.index()) { - - case 0: - tag = "Unbounded"; - is_unit = true; - break; - case 1: - tag = "Bounded"; - is_unit = false; - break; - default: - throw_or_abort("unknown enum 'ExpressionWidth' variant index: " + std::to_string(value.index())); - } - if (is_unit) { - packer.pack(tag); - } else { - std::visit( - [&packer, tag](const auto& arg) { - std::map data; - data[tag] = msgpack::object(arg); - packer.pack(data); - }, - value); - } - } - - void msgpack_unpack(msgpack::object const& o) - { - - if (o.type != msgpack::type::object_type::MAP && o.type != msgpack::type::object_type::STR) { - std::cerr << o << std::endl; - throw_or_abort("expected MAP or STR for enum 'ExpressionWidth'; got type " + std::to_string(o.type)); - } - if (o.type == msgpack::type::object_type::MAP && o.via.map.size != 1) { - throw_or_abort("expected 1 entry for enum 'ExpressionWidth'; got " + std::to_string(o.via.map.size)); - } - std::string tag; - try { - if (o.type == msgpack::type::object_type::MAP) { - o.via.map.ptr[0].key.convert(tag); - } else { - o.convert(tag); - } - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting tag to string for enum 'ExpressionWidth'"); - } - if (tag == "Unbounded") { - Unbounded v; - value = v; - } else if (tag == "Bounded") { - Bounded v; - try { - o.via.map.ptr[0].val.convert(v); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into enum variant 'ExpressionWidth::Bounded'"); - } - - value = v; - } else { - std::cerr << o << std::endl; - throw_or_abort("unknown 'ExpressionWidth' enum variant: " + tag); - } - } -}; - -struct OpcodeLocation { - - struct Acir { - uint64_t value; - - friend bool operator==(const Acir&, const Acir&); - std::vector bincodeSerialize() const; - static Acir bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const { packer.pack(value); } - - void msgpack_unpack(msgpack::object const& o) - { - try { - o.convert(value); - } catch (const msgpack::type_error&) { - std::cerr << o << std::endl; - throw_or_abort("error converting into newtype 'Acir'"); - } - } - }; - - struct Brillig { - uint64_t acir_index; - uint64_t brillig_index; - - friend bool operator==(const Brillig&, const Brillig&); - std::vector bincodeSerialize() const; - static Brillig bincodeDeserialize(std::vector); - - void msgpack_pack(auto& packer) const - { - packer.pack_map(2); - packer.pack(std::make_pair("acir_index", acir_index)); - packer.pack(std::make_pair("brillig_index", brillig_index)); + packer.pack_map(2); + packer.pack(std::make_pair("acir_index", acir_index)); + packer.pack(std::make_pair("brillig_index", brillig_index)); } void msgpack_unpack(msgpack::object const& o) @@ -5151,9 +4496,9 @@ struct PublicInputs { }; struct Circuit { + std::string function_name; uint32_t current_witness_index; std::vector opcodes; - Acir::ExpressionWidth expression_width; std::vector private_parameters; Acir::PublicInputs public_parameters; Acir::PublicInputs return_values; @@ -5166,9 +4511,9 @@ struct Circuit { void msgpack_pack(auto& packer) const { packer.pack_map(7); + packer.pack(std::make_pair("function_name", function_name)); packer.pack(std::make_pair("current_witness_index", current_witness_index)); packer.pack(std::make_pair("opcodes", opcodes)); - packer.pack(std::make_pair("expression_width", expression_width)); packer.pack(std::make_pair("private_parameters", private_parameters)); packer.pack(std::make_pair("public_parameters", public_parameters)); packer.pack(std::make_pair("return_values", return_values)); @@ -5179,9 +4524,9 @@ struct Circuit { { auto name = "Circuit"; auto kvmap = Helpers::make_kvmap(o, name); + Helpers::conv_fld_from_kvmap(kvmap, name, "function_name", function_name, false); Helpers::conv_fld_from_kvmap(kvmap, name, "current_witness_index", current_witness_index, false); Helpers::conv_fld_from_kvmap(kvmap, name, "opcodes", opcodes, false); - Helpers::conv_fld_from_kvmap(kvmap, name, "expression_width", expression_width, false); Helpers::conv_fld_from_kvmap(kvmap, name, "private_parameters", private_parameters, false); Helpers::conv_fld_from_kvmap(kvmap, name, "public_parameters", public_parameters, false); Helpers::conv_fld_from_kvmap(kvmap, name, "return_values", return_values, false); @@ -5190,6 +4535,7 @@ struct Circuit { }; struct BrilligBytecode { + std::string function_name; std::vector bytecode; friend bool operator==(const BrilligBytecode&, const BrilligBytecode&); @@ -5198,7 +4544,8 @@ struct BrilligBytecode { void msgpack_pack(auto& packer) const { - packer.pack_map(1); + packer.pack_map(2); + packer.pack(std::make_pair("function_name", function_name)); packer.pack(std::make_pair("bytecode", bytecode)); } @@ -5206,6 +4553,7 @@ struct BrilligBytecode { { auto name = "BrilligBytecode"; auto kvmap = Helpers::make_kvmap(o, name); + Helpers::conv_fld_from_kvmap(kvmap, name, "function_name", function_name, false); Helpers::conv_fld_from_kvmap(kvmap, name, "bytecode", bytecode, false); } }; @@ -5255,6 +4603,115 @@ struct ProgramWithoutBrillig { } }; +struct ExpressionWidth { + + struct Unbounded { + friend bool operator==(const Unbounded&, const Unbounded&); + std::vector bincodeSerialize() const; + static Unbounded bincodeDeserialize(std::vector); + + void msgpack_pack(auto& packer) const {} + void msgpack_unpack(msgpack::object const& o) {} + }; + + struct Bounded { + uint64_t width; + + friend bool operator==(const Bounded&, const Bounded&); + std::vector bincodeSerialize() const; + static Bounded bincodeDeserialize(std::vector); + + void msgpack_pack(auto& packer) const + { + packer.pack_map(1); + packer.pack(std::make_pair("width", width)); + } + + void msgpack_unpack(msgpack::object const& o) + { + auto name = "Bounded"; + auto kvmap = Helpers::make_kvmap(o, name); + Helpers::conv_fld_from_kvmap(kvmap, name, "width", width, false); + } + }; + + std::variant value; + + friend bool operator==(const ExpressionWidth&, const ExpressionWidth&); + std::vector bincodeSerialize() const; + static ExpressionWidth bincodeDeserialize(std::vector); + + void msgpack_pack(auto& packer) const + { + std::string tag; + bool is_unit; + switch (value.index()) { + + case 0: + tag = "Unbounded"; + is_unit = true; + break; + case 1: + tag = "Bounded"; + is_unit = false; + break; + default: + throw_or_abort("unknown enum 'ExpressionWidth' variant index: " + std::to_string(value.index())); + } + if (is_unit) { + packer.pack(tag); + } else { + std::visit( + [&packer, tag](const auto& arg) { + std::map data; + data[tag] = msgpack::object(arg); + packer.pack(data); + }, + value); + } + } + + void msgpack_unpack(msgpack::object const& o) + { + + if (o.type != msgpack::type::object_type::MAP && o.type != msgpack::type::object_type::STR) { + std::cerr << o << std::endl; + throw_or_abort("expected MAP or STR for enum 'ExpressionWidth'; got type " + std::to_string(o.type)); + } + if (o.type == msgpack::type::object_type::MAP && o.via.map.size != 1) { + throw_or_abort("expected 1 entry for enum 'ExpressionWidth'; got " + std::to_string(o.via.map.size)); + } + std::string tag; + try { + if (o.type == msgpack::type::object_type::MAP) { + o.via.map.ptr[0].key.convert(tag); + } else { + o.convert(tag); + } + } catch (const msgpack::type_error&) { + std::cerr << o << std::endl; + throw_or_abort("error converting tag to string for enum 'ExpressionWidth'"); + } + if (tag == "Unbounded") { + Unbounded v; + value = v; + } else if (tag == "Bounded") { + Bounded v; + try { + o.via.map.ptr[0].val.convert(v); + } catch (const msgpack::type_error&) { + std::cerr << o << std::endl; + throw_or_abort("error converting into enum variant 'ExpressionWidth::Bounded'"); + } + + value = v; + } else { + std::cerr << o << std::endl; + throw_or_abort("unknown 'ExpressionWidth' enum variant: " + tag); + } + } +}; + } // end of namespace Acir namespace Acir { @@ -5279,10 +4736,9 @@ inline std::vector AssertionPayload::bincodeSerialize() const inline AssertionPayload AssertionPayload::bincodeDeserialize(std::vector input) { - const size_t input_size = input.size(); auto deserializer = serde::BincodeDeserializer(input); auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input_size) { + if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } return value; @@ -6459,6 +5915,9 @@ inline bool operator==(const BlackBoxFuncCall::AND& lhs, const BlackBoxFuncCall: if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.num_bits == rhs.num_bits)) { + return false; + } if (!(lhs.output == rhs.output)) { return false; } @@ -6491,6 +5950,7 @@ void serde::Serializable::serialize(const Acir::Bla { serde::Serializable::serialize(obj.lhs, serializer); serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.num_bits, serializer); serde::Serializable::serialize(obj.output, serializer); } @@ -6501,6 +5961,7 @@ Acir::BlackBoxFuncCall::AND serde::Deserializable:: Acir::BlackBoxFuncCall::AND obj; obj.lhs = serde::Deserializable::deserialize(deserializer); obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.num_bits = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6515,6 +5976,9 @@ inline bool operator==(const BlackBoxFuncCall::XOR& lhs, const BlackBoxFuncCall: if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.num_bits == rhs.num_bits)) { + return false; + } if (!(lhs.output == rhs.output)) { return false; } @@ -6547,6 +6011,7 @@ void serde::Serializable::serialize(const Acir::Bla { serde::Serializable::serialize(obj.lhs, serializer); serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.num_bits, serializer); serde::Serializable::serialize(obj.output, serializer); } @@ -6557,6 +6022,7 @@ Acir::BlackBoxFuncCall::XOR serde::Deserializable:: Acir::BlackBoxFuncCall::XOR obj; obj.lhs = serde::Deserializable::deserialize(deserializer); obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.num_bits = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6568,6 +6034,9 @@ inline bool operator==(const BlackBoxFuncCall::RANGE& lhs, const BlackBoxFuncCal if (!(lhs.input == rhs.input)) { return false; } + if (!(lhs.num_bits == rhs.num_bits)) { + return false; + } return true; } @@ -6596,6 +6065,7 @@ void serde::Serializable::serialize(const Acir::B Serializer& serializer) { serde::Serializable::serialize(obj.input, serializer); + serde::Serializable::serialize(obj.num_bits, serializer); } template <> @@ -6605,6 +6075,7 @@ Acir::BlackBoxFuncCall::RANGE serde::Deserializable::deserialize(deserializer); + obj.num_bits = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6728,6 +6199,9 @@ inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256k1& lhs, const BlackB if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { + return false; + } if (!(lhs.output == rhs.output)) { return false; } @@ -6762,6 +6236,7 @@ void serde::Serializable::serialize( serde::Serializable::serialize(obj.public_key_y, serializer); serde::Serializable::serialize(obj.signature, serializer); serde::Serializable::serialize(obj.hashed_message, serializer); + serde::Serializable::serialize(obj.predicate, serializer); serde::Serializable::serialize(obj.output, serializer); } @@ -6775,6 +6250,7 @@ Acir::BlackBoxFuncCall::EcdsaSecp256k1 serde::Deserializable::deserialize(deserializer); obj.signature = serde::Deserializable::deserialize(deserializer); obj.hashed_message = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6795,7 +6271,10 @@ inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256r1& lhs, const BlackB if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.predicate == rhs.predicate)) { + return false; + } + if (!(lhs.output == rhs.output)) { return false; } return true; @@ -6829,6 +6308,7 @@ void serde::Serializable::serialize( serde::Serializable::serialize(obj.public_key_y, serializer); serde::Serializable::serialize(obj.signature, serializer); serde::Serializable::serialize(obj.hashed_message, serializer); + serde::Serializable::serialize(obj.predicate, serializer); serde::Serializable::serialize(obj.output, serializer); } @@ -6842,6 +6322,7 @@ Acir::BlackBoxFuncCall::EcdsaSecp256r1 serde::Deserializable::deserialize(deserializer); obj.signature = serde::Deserializable::deserialize(deserializer); obj.hashed_message = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6856,6 +6337,9 @@ inline bool operator==(const BlackBoxFuncCall::MultiScalarMul& lhs, const BlackB if (!(lhs.scalars == rhs.scalars)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { + return false; + } if (!(lhs.outputs == rhs.outputs)) { return false; } @@ -6888,6 +6372,7 @@ void serde::Serializable::serialize( { serde::Serializable::serialize(obj.points, serializer); serde::Serializable::serialize(obj.scalars, serializer); + serde::Serializable::serialize(obj.predicate, serializer); serde::Serializable::serialize(obj.outputs, serializer); } @@ -6899,6 +6384,7 @@ Acir::BlackBoxFuncCall::MultiScalarMul serde::Deserializable::deserialize(deserializer); obj.scalars = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6913,6 +6399,9 @@ inline bool operator==(const BlackBoxFuncCall::EmbeddedCurveAdd& lhs, const Blac if (!(lhs.input2 == rhs.input2)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { + return false; + } if (!(lhs.outputs == rhs.outputs)) { return false; } @@ -6946,6 +6435,7 @@ void serde::Serializable::serialize( { serde::Serializable::serialize(obj.input1, serializer); serde::Serializable::serialize(obj.input2, serializer); + serde::Serializable::serialize(obj.predicate, serializer); serde::Serializable::serialize(obj.outputs, serializer); } @@ -6957,6 +6447,7 @@ Acir::BlackBoxFuncCall::EmbeddedCurveAdd serde::Deserializable::deserialize(deserializer); obj.input2 = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } @@ -7033,6 +6524,9 @@ inline bool operator==(const BlackBoxFuncCall::RecursiveAggregation& lhs, if (!(lhs.proof_type == rhs.proof_type)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { + return false; + } return true; } @@ -7066,6 +6560,7 @@ void serde::Serializable::serializ serde::Serializable::serialize(obj.public_inputs, serializer); serde::Serializable::serialize(obj.key_hash, serializer); serde::Serializable::serialize(obj.proof_type, serializer); + serde::Serializable::serialize(obj.predicate, serializer); } template <> @@ -7079,93 +6574,36 @@ Acir::BlackBoxFuncCall::RecursiveAggregation serde::Deserializable< obj.public_inputs = serde::Deserializable::deserialize(deserializer); obj.key_hash = serde::Deserializable::deserialize(deserializer); obj.proof_type = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxFuncCall::BigIntAdd& lhs, const BlackBoxFuncCall::BigIntAdd& rhs) -{ - if (!(lhs.lhs == rhs.lhs)) { - return false; - } - if (!(lhs.rhs == rhs.rhs)) { - return false; - } - if (!(lhs.output == rhs.output)) { - return false; - } - return true; -} - -inline std::vector BlackBoxFuncCall::BigIntAdd::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxFuncCall::BigIntAdd BlackBoxFuncCall::BigIntAdd::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxFuncCall::BigIntAdd& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); -} - -template <> -template -Acir::BlackBoxFuncCall::BigIntAdd serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxFuncCall::BigIntAdd obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxFuncCall::BigIntSub& lhs, const BlackBoxFuncCall::BigIntSub& rhs) +inline bool operator==(const BlackBoxFuncCall::Poseidon2Permutation& lhs, + const BlackBoxFuncCall::Poseidon2Permutation& rhs) { - if (!(lhs.lhs == rhs.lhs)) { - return false; - } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } -inline std::vector BlackBoxFuncCall::BigIntSub::bincodeSerialize() const +inline std::vector BlackBoxFuncCall::Poseidon2Permutation::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxFuncCall::BigIntSub BlackBoxFuncCall::BigIntSub::bincodeDeserialize(std::vector input) +inline BlackBoxFuncCall::Poseidon2Permutation BlackBoxFuncCall::Poseidon2Permutation::bincodeDeserialize( + std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -7176,53 +6614,52 @@ inline BlackBoxFuncCall::BigIntSub BlackBoxFuncCall::BigIntSub::bincodeDeseriali template <> template -void serde::Serializable::serialize(const Acir::BlackBoxFuncCall::BigIntSub& obj, - Serializer& serializer) +void serde::Serializable::serialize( + const Acir::BlackBoxFuncCall::Poseidon2Permutation& obj, Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Acir::BlackBoxFuncCall::BigIntSub serde::Deserializable::deserialize( - Deserializer& deserializer) +Acir::BlackBoxFuncCall::Poseidon2Permutation serde::Deserializable< + Acir::BlackBoxFuncCall::Poseidon2Permutation>::deserialize(Deserializer& deserializer) { - Acir::BlackBoxFuncCall::BigIntSub obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxFuncCall::Poseidon2Permutation obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxFuncCall::BigIntMul& lhs, const BlackBoxFuncCall::BigIntMul& rhs) +inline bool operator==(const BlackBoxFuncCall::Sha256Compression& lhs, const BlackBoxFuncCall::Sha256Compression& rhs) { - if (!(lhs.lhs == rhs.lhs)) { + if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.hash_values == rhs.hash_values)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } -inline std::vector BlackBoxFuncCall::BigIntMul::bincodeSerialize() const +inline std::vector BlackBoxFuncCall::Sha256Compression::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxFuncCall::BigIntMul BlackBoxFuncCall::BigIntMul::bincodeDeserialize(std::vector input) +inline BlackBoxFuncCall::Sha256Compression BlackBoxFuncCall::Sha256Compression::bincodeDeserialize( + std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -7233,53 +6670,47 @@ inline BlackBoxFuncCall::BigIntMul BlackBoxFuncCall::BigIntMul::bincodeDeseriali template <> template -void serde::Serializable::serialize(const Acir::BlackBoxFuncCall::BigIntMul& obj, - Serializer& serializer) +void serde::Serializable::serialize( + const Acir::BlackBoxFuncCall::Sha256Compression& obj, Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.hash_values, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Acir::BlackBoxFuncCall::BigIntMul serde::Deserializable::deserialize( +Acir::BlackBoxFuncCall::Sha256Compression serde::Deserializable::deserialize( Deserializer& deserializer) { - Acir::BlackBoxFuncCall::BigIntMul obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxFuncCall::Sha256Compression obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.hash_values = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxFuncCall::BigIntDiv& lhs, const BlackBoxFuncCall::BigIntDiv& rhs) +inline bool operator==(const BlackBoxOp& lhs, const BlackBoxOp& rhs) { - if (!(lhs.lhs == rhs.lhs)) { - return false; - } - if (!(lhs.rhs == rhs.rhs)) { - return false; - } - if (!(lhs.output == rhs.output)) { + if (!(lhs.value == rhs.value)) { return false; } return true; } -inline std::vector BlackBoxFuncCall::BigIntDiv::bincodeSerialize() const +inline std::vector BlackBoxOp::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxFuncCall::BigIntDiv BlackBoxFuncCall::BigIntDiv::bincodeDeserialize(std::vector input) +inline BlackBoxOp BlackBoxOp::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -7290,89 +6721,35 @@ inline BlackBoxFuncCall::BigIntDiv BlackBoxFuncCall::BigIntDiv::bincodeDeseriali template <> template -void serde::Serializable::serialize(const Acir::BlackBoxFuncCall::BigIntDiv& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp& obj, Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Acir::BlackBoxFuncCall::BigIntDiv serde::Deserializable::deserialize( - Deserializer& deserializer) +Acir::BlackBoxOp serde::Deserializable::deserialize(Deserializer& deserializer) { - Acir::BlackBoxFuncCall::BigIntDiv obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + deserializer.increase_container_depth(); + Acir::BlackBoxOp obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Acir { -inline bool operator==(const BlackBoxFuncCall::BigIntFromLeBytes& lhs, const BlackBoxFuncCall::BigIntFromLeBytes& rhs) +inline bool operator==(const BlackBoxOp::AES128Encrypt& lhs, const BlackBoxOp::AES128Encrypt& rhs) { if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.modulus == rhs.modulus)) { - return false; - } - if (!(lhs.output == rhs.output)) { + if (!(lhs.iv == rhs.iv)) { return false; } - return true; -} - -inline std::vector BlackBoxFuncCall::BigIntFromLeBytes::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxFuncCall::BigIntFromLeBytes BlackBoxFuncCall::BigIntFromLeBytes::bincodeDeserialize( - std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize( - const Acir::BlackBoxFuncCall::BigIntFromLeBytes& obj, Serializer& serializer) -{ - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.modulus, serializer); - serde::Serializable::serialize(obj.output, serializer); -} - -template <> -template -Acir::BlackBoxFuncCall::BigIntFromLeBytes serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxFuncCall::BigIntFromLeBytes obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.modulus = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxFuncCall::BigIntToLeBytes& lhs, const BlackBoxFuncCall::BigIntToLeBytes& rhs) -{ - if (!(lhs.input == rhs.input)) { + if (!(lhs.key == rhs.key)) { return false; } if (!(lhs.outputs == rhs.outputs)) { @@ -7381,18 +6758,17 @@ inline bool operator==(const BlackBoxFuncCall::BigIntToLeBytes& lhs, const Black return true; } -inline std::vector BlackBoxFuncCall::BigIntToLeBytes::bincodeSerialize() const +inline std::vector BlackBoxOp::AES128Encrypt::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxFuncCall::BigIntToLeBytes BlackBoxFuncCall::BigIntToLeBytes::bincodeDeserialize( - std::vector input) +inline BlackBoxOp::AES128Encrypt BlackBoxOp::AES128Encrypt::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -7403,53 +6779,52 @@ inline BlackBoxFuncCall::BigIntToLeBytes BlackBoxFuncCall::BigIntToLeBytes::binc template <> template -void serde::Serializable::serialize( - const Acir::BlackBoxFuncCall::BigIntToLeBytes& obj, Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::AES128Encrypt& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.input, serializer); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.iv, serializer); + serde::Serializable::serialize(obj.key, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Acir::BlackBoxFuncCall::BigIntToLeBytes serde::Deserializable::deserialize( +Acir::BlackBoxOp::AES128Encrypt serde::Deserializable::deserialize( Deserializer& deserializer) { - Acir::BlackBoxFuncCall::BigIntToLeBytes obj; - obj.input = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::AES128Encrypt obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.iv = serde::Deserializable::deserialize(deserializer); + obj.key = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxFuncCall::Poseidon2Permutation& lhs, - const BlackBoxFuncCall::Poseidon2Permutation& rhs) +inline bool operator==(const BlackBoxOp::Blake2s& lhs, const BlackBoxOp::Blake2s& rhs) { - if (!(lhs.inputs == rhs.inputs)) { - return false; - } - if (!(lhs.outputs == rhs.outputs)) { + if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.len == rhs.len)) { + if (!(lhs.output == rhs.output)) { return false; } return true; } -inline std::vector BlackBoxFuncCall::Poseidon2Permutation::bincodeSerialize() const +inline std::vector BlackBoxOp::Blake2s::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxFuncCall::Poseidon2Permutation BlackBoxFuncCall::Poseidon2Permutation::bincodeDeserialize( - std::vector input) +inline BlackBoxOp::Blake2s BlackBoxOp::Blake2s::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -7460,625 +6835,28 @@ inline BlackBoxFuncCall::Poseidon2Permutation BlackBoxFuncCall::Poseidon2Permuta template <> template -void serde::Serializable::serialize( - const Acir::BlackBoxFuncCall::Poseidon2Permutation& obj, Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::Blake2s& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.outputs, serializer); - serde::Serializable::serialize(obj.len, serializer); + serde::Serializable::serialize(obj.message, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Acir::BlackBoxFuncCall::Poseidon2Permutation serde::Deserializable< - Acir::BlackBoxFuncCall::Poseidon2Permutation>::deserialize(Deserializer& deserializer) +Acir::BlackBoxOp::Blake2s serde::Deserializable::deserialize(Deserializer& deserializer) { - Acir::BlackBoxFuncCall::Poseidon2Permutation obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - obj.len = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxFuncCall::Sha256Compression& lhs, const BlackBoxFuncCall::Sha256Compression& rhs) -{ - if (!(lhs.inputs == rhs.inputs)) { - return false; - } - if (!(lhs.hash_values == rhs.hash_values)) { - return false; - } - if (!(lhs.outputs == rhs.outputs)) { - return false; - } - return true; -} - -inline std::vector BlackBoxFuncCall::Sha256Compression::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxFuncCall::Sha256Compression BlackBoxFuncCall::Sha256Compression::bincodeDeserialize( - std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize( - const Acir::BlackBoxFuncCall::Sha256Compression& obj, Serializer& serializer) -{ - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.hash_values, serializer); - serde::Serializable::serialize(obj.outputs, serializer); -} - -template <> -template -Acir::BlackBoxFuncCall::Sha256Compression serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxFuncCall::Sha256Compression obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.hash_values = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp& lhs, const BlackBoxOp& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp BlackBoxOp::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp& obj, Serializer& serializer) -{ - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Acir::BlackBoxOp serde::Deserializable::deserialize(Deserializer& deserializer) -{ - deserializer.increase_container_depth(); - Acir::BlackBoxOp obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::AES128Encrypt& lhs, const BlackBoxOp::AES128Encrypt& rhs) -{ - if (!(lhs.inputs == rhs.inputs)) { - return false; - } - if (!(lhs.iv == rhs.iv)) { - return false; - } - if (!(lhs.key == rhs.key)) { - return false; - } - if (!(lhs.outputs == rhs.outputs)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::AES128Encrypt::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::AES128Encrypt BlackBoxOp::AES128Encrypt::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::AES128Encrypt& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.iv, serializer); - serde::Serializable::serialize(obj.key, serializer); - serde::Serializable::serialize(obj.outputs, serializer); -} - -template <> -template -Acir::BlackBoxOp::AES128Encrypt serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::AES128Encrypt obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.iv = serde::Deserializable::deserialize(deserializer); - obj.key = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::Blake2s& lhs, const BlackBoxOp::Blake2s& rhs) -{ - if (!(lhs.message == rhs.message)) { - return false; - } - if (!(lhs.output == rhs.output)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::Blake2s::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::Blake2s BlackBoxOp::Blake2s::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::Blake2s& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); -} - -template <> -template -Acir::BlackBoxOp::Blake2s serde::Deserializable::deserialize(Deserializer& deserializer) -{ - Acir::BlackBoxOp::Blake2s obj; - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::Blake3& lhs, const BlackBoxOp::Blake3& rhs) -{ - if (!(lhs.message == rhs.message)) { - return false; - } - if (!(lhs.output == rhs.output)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::Blake3::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::Blake3 BlackBoxOp::Blake3::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::Blake3& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); -} - -template <> -template -Acir::BlackBoxOp::Blake3 serde::Deserializable::deserialize(Deserializer& deserializer) -{ - Acir::BlackBoxOp::Blake3 obj; - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::Keccakf1600& lhs, const BlackBoxOp::Keccakf1600& rhs) -{ - if (!(lhs.input == rhs.input)) { - return false; - } - if (!(lhs.output == rhs.output)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::Keccakf1600::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::Keccakf1600 BlackBoxOp::Keccakf1600::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::Keccakf1600& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.output, serializer); -} - -template <> -template -Acir::BlackBoxOp::Keccakf1600 serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::Keccakf1600 obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::EcdsaSecp256k1& lhs, const BlackBoxOp::EcdsaSecp256k1& rhs) -{ - if (!(lhs.hashed_msg == rhs.hashed_msg)) { - return false; - } - if (!(lhs.public_key_x == rhs.public_key_x)) { - return false; - } - if (!(lhs.public_key_y == rhs.public_key_y)) { - return false; - } - if (!(lhs.signature == rhs.signature)) { - return false; - } - if (!(lhs.result == rhs.result)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::EcdsaSecp256k1::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::EcdsaSecp256k1 BlackBoxOp::EcdsaSecp256k1::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::EcdsaSecp256k1& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.hashed_msg, serializer); - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.result, serializer); -} - -template <> -template -Acir::BlackBoxOp::EcdsaSecp256k1 serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::EcdsaSecp256k1 obj; - obj.hashed_msg = serde::Deserializable::deserialize(deserializer); - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::EcdsaSecp256r1& lhs, const BlackBoxOp::EcdsaSecp256r1& rhs) -{ - if (!(lhs.hashed_msg == rhs.hashed_msg)) { - return false; - } - if (!(lhs.public_key_x == rhs.public_key_x)) { - return false; - } - if (!(lhs.public_key_y == rhs.public_key_y)) { - return false; - } - if (!(lhs.signature == rhs.signature)) { - return false; - } - if (!(lhs.result == rhs.result)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::EcdsaSecp256r1::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::EcdsaSecp256r1 BlackBoxOp::EcdsaSecp256r1::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::EcdsaSecp256r1& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.hashed_msg, serializer); - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.result, serializer); -} - -template <> -template -Acir::BlackBoxOp::EcdsaSecp256r1 serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::EcdsaSecp256r1 obj; - obj.hashed_msg = serde::Deserializable::deserialize(deserializer); - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::MultiScalarMul& lhs, const BlackBoxOp::MultiScalarMul& rhs) -{ - if (!(lhs.points == rhs.points)) { - return false; - } - if (!(lhs.scalars == rhs.scalars)) { - return false; - } - if (!(lhs.outputs == rhs.outputs)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::MultiScalarMul::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::MultiScalarMul BlackBoxOp::MultiScalarMul::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::MultiScalarMul& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.points, serializer); - serde::Serializable::serialize(obj.scalars, serializer); - serde::Serializable::serialize(obj.outputs, serializer); -} - -template <> -template -Acir::BlackBoxOp::MultiScalarMul serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::MultiScalarMul obj; - obj.points = serde::Deserializable::deserialize(deserializer); - obj.scalars = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BlackBoxOp::EmbeddedCurveAdd& lhs, const BlackBoxOp::EmbeddedCurveAdd& rhs) -{ - if (!(lhs.input1_x == rhs.input1_x)) { - return false; - } - if (!(lhs.input1_y == rhs.input1_y)) { - return false; - } - if (!(lhs.input1_infinite == rhs.input1_infinite)) { - return false; - } - if (!(lhs.input2_x == rhs.input2_x)) { - return false; - } - if (!(lhs.input2_y == rhs.input2_y)) { - return false; - } - if (!(lhs.input2_infinite == rhs.input2_infinite)) { - return false; - } - if (!(lhs.result == rhs.result)) { - return false; - } - return true; -} - -inline std::vector BlackBoxOp::EmbeddedCurveAdd::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BlackBoxOp::EmbeddedCurveAdd BlackBoxOp::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BlackBoxOp::EmbeddedCurveAdd& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.input1_x, serializer); - serde::Serializable::serialize(obj.input1_y, serializer); - serde::Serializable::serialize(obj.input1_infinite, serializer); - serde::Serializable::serialize(obj.input2_x, serializer); - serde::Serializable::serialize(obj.input2_y, serializer); - serde::Serializable::serialize(obj.input2_infinite, serializer); - serde::Serializable::serialize(obj.result, serializer); -} - -template <> -template -Acir::BlackBoxOp::EmbeddedCurveAdd serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::BlackBoxOp::EmbeddedCurveAdd obj; - obj.input1_x = serde::Deserializable::deserialize(deserializer); - obj.input1_y = serde::Deserializable::deserialize(deserializer); - obj.input1_infinite = serde::Deserializable::deserialize(deserializer); - obj.input2_x = serde::Deserializable::deserialize(deserializer); - obj.input2_y = serde::Deserializable::deserialize(deserializer); - obj.input2_infinite = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::Blake2s obj; + obj.message = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntAdd& lhs, const BlackBoxOp::BigIntAdd& rhs) +inline bool operator==(const BlackBoxOp::Blake3& lhs, const BlackBoxOp::Blake3& rhs) { - if (!(lhs.lhs == rhs.lhs)) { - return false; - } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { @@ -8087,17 +6865,17 @@ inline bool operator==(const BlackBoxOp::BigIntAdd& lhs, const BlackBoxOp::BigIn return true; } -inline std::vector BlackBoxOp::BigIntAdd::bincodeSerialize() const +inline std::vector BlackBoxOp::Blake3::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntAdd BlackBoxOp::BigIntAdd::bincodeDeserialize(std::vector input) +inline BlackBoxOp::Blake3 BlackBoxOp::Blake3::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8108,33 +6886,28 @@ inline BlackBoxOp::BigIntAdd BlackBoxOp::BigIntAdd::bincodeDeserialize(std::vect template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntAdd& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::Blake3& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Acir::BlackBoxOp::BigIntAdd serde::Deserializable::deserialize(Deserializer& deserializer) +Acir::BlackBoxOp::Blake3 serde::Deserializable::deserialize(Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntAdd obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::Blake3 obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntSub& lhs, const BlackBoxOp::BigIntSub& rhs) +inline bool operator==(const BlackBoxOp::Keccakf1600& lhs, const BlackBoxOp::Keccakf1600& rhs) { - if (!(lhs.lhs == rhs.lhs)) { - return false; - } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.input == rhs.input)) { return false; } if (!(lhs.output == rhs.output)) { @@ -8143,17 +6916,17 @@ inline bool operator==(const BlackBoxOp::BigIntSub& lhs, const BlackBoxOp::BigIn return true; } -inline std::vector BlackBoxOp::BigIntSub::bincodeSerialize() const +inline std::vector BlackBoxOp::Keccakf1600::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntSub BlackBoxOp::BigIntSub::bincodeDeserialize(std::vector input) +inline BlackBoxOp::Keccakf1600 BlackBoxOp::Keccakf1600::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8164,52 +6937,57 @@ inline BlackBoxOp::BigIntSub BlackBoxOp::BigIntSub::bincodeDeserialize(std::vect template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntSub& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::Keccakf1600& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.input, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Acir::BlackBoxOp::BigIntSub serde::Deserializable::deserialize(Deserializer& deserializer) +Acir::BlackBoxOp::Keccakf1600 serde::Deserializable::deserialize( + Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntSub obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::Keccakf1600 obj; + obj.input = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntMul& lhs, const BlackBoxOp::BigIntMul& rhs) +inline bool operator==(const BlackBoxOp::EcdsaSecp256k1& lhs, const BlackBoxOp::EcdsaSecp256k1& rhs) { - if (!(lhs.lhs == rhs.lhs)) { + if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.public_key_y == rhs.public_key_y)) { + return false; + } + if (!(lhs.signature == rhs.signature)) { + return false; + } + if (!(lhs.result == rhs.result)) { return false; } return true; } -inline std::vector BlackBoxOp::BigIntMul::bincodeSerialize() const +inline std::vector BlackBoxOp::EcdsaSecp256k1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntMul BlackBoxOp::BigIntMul::bincodeDeserialize(std::vector input) +inline BlackBoxOp::EcdsaSecp256k1 BlackBoxOp::EcdsaSecp256k1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8220,52 +6998,63 @@ inline BlackBoxOp::BigIntMul BlackBoxOp::BigIntMul::bincodeDeserialize(std::vect template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntMul& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::EcdsaSecp256k1& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.hashed_msg, serializer); + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Acir::BlackBoxOp::BigIntMul serde::Deserializable::deserialize(Deserializer& deserializer) +Acir::BlackBoxOp::EcdsaSecp256k1 serde::Deserializable::deserialize( + Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntMul obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::EcdsaSecp256k1 obj; + obj.hashed_msg = serde::Deserializable::deserialize(deserializer); + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntDiv& lhs, const BlackBoxOp::BigIntDiv& rhs) +inline bool operator==(const BlackBoxOp::EcdsaSecp256r1& lhs, const BlackBoxOp::EcdsaSecp256r1& rhs) { - if (!(lhs.lhs == rhs.lhs)) { + if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.public_key_y == rhs.public_key_y)) { + return false; + } + if (!(lhs.signature == rhs.signature)) { + return false; + } + if (!(lhs.result == rhs.result)) { return false; } return true; } -inline std::vector BlackBoxOp::BigIntDiv::bincodeSerialize() const +inline std::vector BlackBoxOp::EcdsaSecp256r1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntDiv BlackBoxOp::BigIntDiv::bincodeDeserialize(std::vector input) +inline BlackBoxOp::EcdsaSecp256r1 BlackBoxOp::EcdsaSecp256r1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8276,52 +7065,57 @@ inline BlackBoxOp::BigIntDiv BlackBoxOp::BigIntDiv::bincodeDeserialize(std::vect template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntDiv& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::EcdsaSecp256r1& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.hashed_msg, serializer); + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Acir::BlackBoxOp::BigIntDiv serde::Deserializable::deserialize(Deserializer& deserializer) +Acir::BlackBoxOp::EcdsaSecp256r1 serde::Deserializable::deserialize( + Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntDiv obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::EcdsaSecp256r1 obj; + obj.hashed_msg = serde::Deserializable::deserialize(deserializer); + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntFromLeBytes& lhs, const BlackBoxOp::BigIntFromLeBytes& rhs) +inline bool operator==(const BlackBoxOp::MultiScalarMul& lhs, const BlackBoxOp::MultiScalarMul& rhs) { - if (!(lhs.inputs == rhs.inputs)) { + if (!(lhs.points == rhs.points)) { return false; } - if (!(lhs.modulus == rhs.modulus)) { + if (!(lhs.scalars == rhs.scalars)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } -inline std::vector BlackBoxOp::BigIntFromLeBytes::bincodeSerialize() const +inline std::vector BlackBoxOp::MultiScalarMul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntFromLeBytes BlackBoxOp::BigIntFromLeBytes::bincodeDeserialize(std::vector input) +inline BlackBoxOp::MultiScalarMul BlackBoxOp::MultiScalarMul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8332,50 +7126,65 @@ inline BlackBoxOp::BigIntFromLeBytes BlackBoxOp::BigIntFromLeBytes::bincodeDeser template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntFromLeBytes& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::MultiScalarMul& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.modulus, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.points, serializer); + serde::Serializable::serialize(obj.scalars, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Acir::BlackBoxOp::BigIntFromLeBytes serde::Deserializable::deserialize( +Acir::BlackBoxOp::MultiScalarMul serde::Deserializable::deserialize( Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntFromLeBytes obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.modulus = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::MultiScalarMul obj; + obj.points = serde::Deserializable::deserialize(deserializer); + obj.scalars = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Acir { -inline bool operator==(const BlackBoxOp::BigIntToLeBytes& lhs, const BlackBoxOp::BigIntToLeBytes& rhs) +inline bool operator==(const BlackBoxOp::EmbeddedCurveAdd& lhs, const BlackBoxOp::EmbeddedCurveAdd& rhs) { - if (!(lhs.input == rhs.input)) { + if (!(lhs.input1_x == rhs.input1_x)) { return false; } - if (!(lhs.output == rhs.output)) { + if (!(lhs.input1_y == rhs.input1_y)) { + return false; + } + if (!(lhs.input1_infinite == rhs.input1_infinite)) { + return false; + } + if (!(lhs.input2_x == rhs.input2_x)) { + return false; + } + if (!(lhs.input2_y == rhs.input2_y)) { + return false; + } + if (!(lhs.input2_infinite == rhs.input2_infinite)) { + return false; + } + if (!(lhs.result == rhs.result)) { return false; } return true; } -inline std::vector BlackBoxOp::BigIntToLeBytes::bincodeSerialize() const +inline std::vector BlackBoxOp::EmbeddedCurveAdd::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BlackBoxOp::BigIntToLeBytes BlackBoxOp::BigIntToLeBytes::bincodeDeserialize(std::vector input) +inline BlackBoxOp::EmbeddedCurveAdd BlackBoxOp::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -8386,21 +7195,31 @@ inline BlackBoxOp::BigIntToLeBytes BlackBoxOp::BigIntToLeBytes::bincodeDeseriali template <> template -void serde::Serializable::serialize(const Acir::BlackBoxOp::BigIntToLeBytes& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BlackBoxOp::EmbeddedCurveAdd& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.input1_x, serializer); + serde::Serializable::serialize(obj.input1_y, serializer); + serde::Serializable::serialize(obj.input1_infinite, serializer); + serde::Serializable::serialize(obj.input2_x, serializer); + serde::Serializable::serialize(obj.input2_y, serializer); + serde::Serializable::serialize(obj.input2_infinite, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Acir::BlackBoxOp::BigIntToLeBytes serde::Deserializable::deserialize( +Acir::BlackBoxOp::EmbeddedCurveAdd serde::Deserializable::deserialize( Deserializer& deserializer) { - Acir::BlackBoxOp::BigIntToLeBytes obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); + Acir::BlackBoxOp::EmbeddedCurveAdd obj; + obj.input1_x = serde::Deserializable::deserialize(deserializer); + obj.input1_y = serde::Deserializable::deserialize(deserializer); + obj.input1_infinite = serde::Deserializable::deserialize(deserializer); + obj.input2_x = serde::Deserializable::deserialize(deserializer); + obj.input2_y = serde::Deserializable::deserialize(deserializer); + obj.input2_infinite = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } @@ -8414,9 +7233,6 @@ inline bool operator==(const BlackBoxOp::Poseidon2Permutation& lhs, const BlackB if (!(lhs.output == rhs.output)) { return false; } - if (!(lhs.len == rhs.len)) { - return false; - } return true; } @@ -8446,7 +7262,6 @@ void serde::Serializable::serialize( { serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); - serde::Serializable::serialize(obj.len, serializer); } template <> @@ -8457,7 +7272,6 @@ Acir::BlackBoxOp::Poseidon2Permutation serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); - obj.len = serde::Deserializable::deserialize(deserializer); return obj; } @@ -8811,6 +7625,9 @@ namespace Acir { inline bool operator==(const BrilligBytecode& lhs, const BrilligBytecode& rhs) { + if (!(lhs.function_name == rhs.function_name)) { + return false; + } if (!(lhs.bytecode == rhs.bytecode)) { return false; } @@ -8841,6 +7658,7 @@ template void serde::Serializable::serialize(const Acir::BrilligBytecode& obj, Serializer& serializer) { serializer.increase_container_depth(); + serde::Serializable::serialize(obj.function_name, serializer); serde::Serializable::serialize(obj.bytecode, serializer); serializer.decrease_container_depth(); } @@ -8851,6 +7669,7 @@ Acir::BrilligBytecode serde::Deserializable::deserialize( { deserializer.increase_container_depth(); Acir::BrilligBytecode obj; + obj.function_name = serde::Deserializable::deserialize(deserializer); obj.bytecode = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; @@ -9245,66 +8064,10 @@ inline std::vector BrilligOpcode::Not::bincodeSerialize() const return std::move(serializer).bytes(); } -inline BrilligOpcode::Not BrilligOpcode::Not::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::BrilligOpcode::Not& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.destination, serializer); - serde::Serializable::serialize(obj.source, serializer); - serde::Serializable::serialize(obj.bit_size, serializer); -} - -template <> -template -Acir::BrilligOpcode::Not serde::Deserializable::deserialize(Deserializer& deserializer) -{ - Acir::BrilligOpcode::Not obj; - obj.destination = serde::Deserializable::deserialize(deserializer); - obj.source = serde::Deserializable::deserialize(deserializer); - obj.bit_size = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const BrilligOpcode::Cast& lhs, const BrilligOpcode::Cast& rhs) -{ - if (!(lhs.destination == rhs.destination)) { - return false; - } - if (!(lhs.source == rhs.source)) { - return false; - } - if (!(lhs.bit_size == rhs.bit_size)) { - return false; - } - return true; -} - -inline std::vector BrilligOpcode::Cast::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline BrilligOpcode::Cast BrilligOpcode::Cast::bincodeDeserialize(std::vector input) +inline BrilligOpcode::Not BrilligOpcode::Not::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -9315,8 +8078,8 @@ inline BrilligOpcode::Cast BrilligOpcode::Cast::bincodeDeserialize(std::vector template -void serde::Serializable::serialize(const Acir::BrilligOpcode::Cast& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BrilligOpcode::Not& obj, + Serializer& serializer) { serde::Serializable::serialize(obj.destination, serializer); serde::Serializable::serialize(obj.source, serializer); @@ -9325,9 +8088,9 @@ void serde::Serializable::serialize(const Acir::Brill template <> template -Acir::BrilligOpcode::Cast serde::Deserializable::deserialize(Deserializer& deserializer) +Acir::BrilligOpcode::Not serde::Deserializable::deserialize(Deserializer& deserializer) { - Acir::BrilligOpcode::Cast obj; + Acir::BrilligOpcode::Not obj; obj.destination = serde::Deserializable::deserialize(deserializer); obj.source = serde::Deserializable::deserialize(deserializer); obj.bit_size = serde::Deserializable::deserialize(deserializer); @@ -9336,28 +8099,31 @@ Acir::BrilligOpcode::Cast serde::Deserializable::dese namespace Acir { -inline bool operator==(const BrilligOpcode::JumpIfNot& lhs, const BrilligOpcode::JumpIfNot& rhs) +inline bool operator==(const BrilligOpcode::Cast& lhs, const BrilligOpcode::Cast& rhs) { - if (!(lhs.condition == rhs.condition)) { + if (!(lhs.destination == rhs.destination)) { return false; } - if (!(lhs.location == rhs.location)) { + if (!(lhs.source == rhs.source)) { + return false; + } + if (!(lhs.bit_size == rhs.bit_size)) { return false; } return true; } -inline std::vector BrilligOpcode::JumpIfNot::bincodeSerialize() const +inline std::vector BrilligOpcode::Cast::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } -inline BrilligOpcode::JumpIfNot BrilligOpcode::JumpIfNot::bincodeDeserialize(std::vector input) +inline BrilligOpcode::Cast BrilligOpcode::Cast::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw_or_abort("Some input bytes were not read"); } @@ -9368,21 +8134,22 @@ inline BrilligOpcode::JumpIfNot BrilligOpcode::JumpIfNot::bincodeDeserialize(std template <> template -void serde::Serializable::serialize(const Acir::BrilligOpcode::JumpIfNot& obj, - Serializer& serializer) +void serde::Serializable::serialize(const Acir::BrilligOpcode::Cast& obj, + Serializer& serializer) { - serde::Serializable::serialize(obj.condition, serializer); - serde::Serializable::serialize(obj.location, serializer); + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.source, serializer); + serde::Serializable::serialize(obj.bit_size, serializer); } template <> template -Acir::BrilligOpcode::JumpIfNot serde::Deserializable::deserialize( - Deserializer& deserializer) +Acir::BrilligOpcode::Cast serde::Deserializable::deserialize(Deserializer& deserializer) { - Acir::BrilligOpcode::JumpIfNot obj; - obj.condition = serde::Deserializable::deserialize(deserializer); - obj.location = serde::Deserializable::deserialize(deserializer); + Acir::BrilligOpcode::Cast obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.source = serde::Deserializable::deserialize(deserializer); + obj.bit_size = serde::Deserializable::deserialize(deserializer); return obj; } @@ -10307,13 +9074,13 @@ namespace Acir { inline bool operator==(const Circuit& lhs, const Circuit& rhs) { - if (!(lhs.current_witness_index == rhs.current_witness_index)) { + if (!(lhs.function_name == rhs.function_name)) { return false; } - if (!(lhs.opcodes == rhs.opcodes)) { + if (!(lhs.current_witness_index == rhs.current_witness_index)) { return false; } - if (!(lhs.expression_width == rhs.expression_width)) { + if (!(lhs.opcodes == rhs.opcodes)) { return false; } if (!(lhs.private_parameters == rhs.private_parameters)) { @@ -10355,9 +9122,9 @@ template void serde::Serializable::serialize(const Acir::Circuit& obj, Serializer& serializer) { serializer.increase_container_depth(); + serde::Serializable::serialize(obj.function_name, serializer); serde::Serializable::serialize(obj.current_witness_index, serializer); serde::Serializable::serialize(obj.opcodes, serializer); - serde::Serializable::serialize(obj.expression_width, serializer); serde::Serializable::serialize(obj.private_parameters, serializer); serde::Serializable::serialize(obj.public_parameters, serializer); serde::Serializable::serialize(obj.return_values, serializer); @@ -10371,9 +9138,9 @@ Acir::Circuit serde::Deserializable::deserialize(Deserializer& de { deserializer.increase_container_depth(); Acir::Circuit obj; + obj.function_name = serde::Deserializable::deserialize(deserializer); obj.current_witness_index = serde::Deserializable::deserialize(deserializer); obj.opcodes = serde::Deserializable::deserialize(deserializer); - obj.expression_width = serde::Deserializable::deserialize(deserializer); obj.private_parameters = serde::Deserializable::deserialize(deserializer); obj.public_parameters = serde::Deserializable::deserialize(deserializer); obj.return_values = serde::Deserializable::deserialize(deserializer); @@ -10384,150 +9151,6 @@ Acir::Circuit serde::Deserializable::deserialize(Deserializer& de namespace Acir { -inline bool operator==(const ConstantOrWitnessEnum& lhs, const ConstantOrWitnessEnum& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ConstantOrWitnessEnum::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ConstantOrWitnessEnum ConstantOrWitnessEnum::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize(const Acir::ConstantOrWitnessEnum& obj, - Serializer& serializer) -{ - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Acir::ConstantOrWitnessEnum serde::Deserializable::deserialize(Deserializer& deserializer) -{ - deserializer.increase_container_depth(); - Acir::ConstantOrWitnessEnum obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - -namespace Acir { - -inline bool operator==(const ConstantOrWitnessEnum::Constant& lhs, const ConstantOrWitnessEnum::Constant& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ConstantOrWitnessEnum::Constant::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ConstantOrWitnessEnum::Constant ConstantOrWitnessEnum::Constant::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize( - const Acir::ConstantOrWitnessEnum::Constant& obj, Serializer& serializer) -{ - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Acir::ConstantOrWitnessEnum::Constant serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::ConstantOrWitnessEnum::Constant obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - -inline bool operator==(const ConstantOrWitnessEnum::Witness& lhs, const ConstantOrWitnessEnum::Witness& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ConstantOrWitnessEnum::Witness::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ConstantOrWitnessEnum::Witness ConstantOrWitnessEnum::Witness::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Acir - -template <> -template -void serde::Serializable::serialize( - const Acir::ConstantOrWitnessEnum::Witness& obj, Serializer& serializer) -{ - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Acir::ConstantOrWitnessEnum::Witness serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Acir::ConstantOrWitnessEnum::Witness obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Acir { - inline bool operator==(const Expression& lhs, const Expression& rhs) { if (!(lhs.mul_terms == rhs.mul_terms)) { @@ -10870,10 +9493,7 @@ namespace Acir { inline bool operator==(const FunctionInput& lhs, const FunctionInput& rhs) { - if (!(lhs.input == rhs.input)) { - return false; - } - if (!(lhs.num_bits == rhs.num_bits)) { + if (!(lhs.value == rhs.value)) { return false; } return true; @@ -10903,8 +9523,7 @@ template void serde::Serializable::serialize(const Acir::FunctionInput& obj, Serializer& serializer) { serializer.increase_container_depth(); - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.num_bits, serializer); + serde::Serializable::serialize(obj.value, serializer); serializer.decrease_container_depth(); } @@ -10914,14 +9533,107 @@ Acir::FunctionInput serde::Deserializable::deserialize(Dese { deserializer.increase_container_depth(); Acir::FunctionInput obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.num_bits = serde::Deserializable::deserialize(deserializer); + obj.value = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } namespace Acir { +inline bool operator==(const FunctionInput::Constant& lhs, const FunctionInput::Constant& rhs) +{ + if (!(lhs.value == rhs.value)) { + return false; + } + return true; +} + +inline std::vector FunctionInput::Constant::bincodeSerialize() const +{ + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); +} + +inline FunctionInput::Constant FunctionInput::Constant::bincodeDeserialize(std::vector input) +{ + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw_or_abort("Some input bytes were not read"); + } + return value; +} + +} // end of namespace Acir + +template <> +template +void serde::Serializable::serialize(const Acir::FunctionInput::Constant& obj, + Serializer& serializer) +{ + serde::Serializable::serialize(obj.value, serializer); +} + +template <> +template +Acir::FunctionInput::Constant serde::Deserializable::deserialize( + Deserializer& deserializer) +{ + Acir::FunctionInput::Constant obj; + obj.value = serde::Deserializable::deserialize(deserializer); + return obj; +} + +namespace Acir { + +inline bool operator==(const FunctionInput::Witness& lhs, const FunctionInput::Witness& rhs) +{ + if (!(lhs.value == rhs.value)) { + return false; + } + return true; +} + +inline std::vector FunctionInput::Witness::bincodeSerialize() const +{ + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); +} + +inline FunctionInput::Witness FunctionInput::Witness::bincodeDeserialize(std::vector input) +{ + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw_or_abort("Some input bytes were not read"); + } + return value; +} + +} // end of namespace Acir + +template <> +template +void serde::Serializable::serialize(const Acir::FunctionInput::Witness& obj, + Serializer& serializer) +{ + serde::Serializable::serialize(obj.value, serializer); +} + +template <> +template +Acir::FunctionInput::Witness serde::Deserializable::deserialize( + Deserializer& deserializer) +{ + Acir::FunctionInput::Witness obj; + obj.value = serde::Deserializable::deserialize(deserializer); + return obj; +} + +namespace Acir { + inline bool operator==(const HeapArray& lhs, const HeapArray& rhs) { if (!(lhs.pointer == rhs.pointer)) { @@ -11862,9 +10574,6 @@ inline bool operator==(const Opcode::MemoryOp& lhs, const Opcode::MemoryOp& rhs) if (!(lhs.op == rhs.op)) { return false; } - if (!(lhs.predicate == rhs.predicate)) { - return false; - } return true; } @@ -11893,7 +10602,6 @@ void serde::Serializable::serialize(const Acir::Opcode:: { serde::Serializable::serialize(obj.block_id, serializer); serde::Serializable::serialize(obj.op, serializer); - serde::Serializable::serialize(obj.predicate, serializer); } template <> @@ -11903,7 +10611,6 @@ Acir::Opcode::MemoryOp serde::Deserializable::deserializ Acir::Opcode::MemoryOp obj; obj.block_id = serde::Deserializable::deserialize(deserializer); obj.op = serde::Deserializable::deserialize(deserializer); - obj.predicate = serde::Deserializable::deserialize(deserializer); return obj; } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/witness_stack.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/witness_stack.hpp index 1ceecb90ce6e..c8ecef95b07b 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/witness_stack.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/witness_stack.hpp @@ -75,7 +75,7 @@ struct Witness { }; struct WitnessMap { - std::map value; + std::map> value; friend bool operator==(const WitnessMap&, const WitnessMap&); std::vector bincodeSerialize() const; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp index b029be066289..ef3951bf750f 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp @@ -37,30 +37,7 @@ TEST_F(Sha256Tests, TestSha256Compression) .varnum = 34, .num_acir_opcodes = 1, .public_inputs = {}, - .logic_constraints = {}, - .range_constraints = {}, - .aes128_constraints = {}, .sha256_compression = { sha256_compression }, - - .ecdsa_k1_constraints = {}, - .ecdsa_r1_constraints = {}, - .blake2s_constraints = {}, - .blake3_constraints = {}, - .keccak_permutations = {}, - .poseidon2_constraints = {}, - .multi_scalar_mul_constraints = {}, - .ec_add_constraints = {}, - .honk_recursion_constraints = {}, - .avm_recursion_constraints = {}, - .ivc_recursion_constraints = {}, - .bigint_from_le_bytes_constraints = {}, - .bigint_to_le_bytes_constraints = {}, - .bigint_operations = {}, - .assert_equalities = {}, - .poly_triple_constraints = {}, - .quad_constraints = {}, - .big_quad_constraints = {}, - .block_constraints = {}, .original_opcode_indices = create_empty_original_opcode_indices(), }; mock_opcode_indices(constraint_system); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp new file mode 100644 index 000000000000..791bf927f677 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp @@ -0,0 +1,84 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once + +#include "barretenberg/dsl/acir_format/acir_format.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include + +namespace acir_format { + +using namespace bb; + +/** + * @brief Generate builder variables from witness indices. This function is useful when receiving the indices of the + * witness from ACIR. + * + * @tparam Builder + * @param builder + * @param witness_indices + * @return std::vector> + */ +template +static std::vector> fields_from_witnesses(Builder& builder, + std::span witness_indices) +{ + std::vector> result; + result.reserve(witness_indices.size()); + for (const auto& idx : witness_indices) { + result.emplace_back(stdlib::field_t::from_witness_index(&builder, idx)); + } + return result; +} + +/** + * @brief Append values to a witness vector and track their indices. + * + * @details This function is useful in mocking situations, when we need to add dummy variables to a builder. + * @tparam T + * @param witness + * @param input + * @return std::vector + */ +template +std::vector add_to_witness_and_track_indices(WitnessVector& witness, std::span input) +{ + std::vector indices; + indices.reserve(input.size()); + auto witness_idx = static_cast(witness.size()); + for (const auto& value : input) { + witness.push_back(bb::fr(value)); + indices.push_back(witness_idx++); + } + return indices; +}; + +/** + * @brief Append values to a witness vector and track their indices. + * + * @details This function is useful in mocking situations, when we need to add dummy variables to a builder. + * + * @tparam T + * @tparam N + * @param witness + * @param input + * @return std::array + */ +template +std::array add_to_witness_and_track_indices(WitnessVector& witness, std::span input) +{ + std::array indices; + auto witness_idx = static_cast(witness.size()); + size_t idx = 0; + for (const auto& value : input) { + witness.push_back(bb::fr(value)); + indices[idx++] = witness_idx++; + } + return indices; +}; + +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/witness_constant.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/witness_constant.hpp index ecfcca619f37..9bc8ccac0f3a 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/witness_constant.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/witness_constant.hpp @@ -5,7 +5,6 @@ // ===================== #pragma once -#include "barretenberg/dsl/acir_format/ecdsa_secp256k1.hpp" #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" #include "barretenberg/stdlib/primitives/group/cycle_group.hpp" @@ -26,6 +25,15 @@ template struct WitnessOrConstant { .is_constant = false, }; } + + static WitnessOrConstant from_constant(FF value) + { + return WitnessOrConstant{ + .index = 0, + .value = value, + .is_constant = true, + }; + } }; template @@ -45,4 +53,4 @@ bb::stdlib::cycle_group to_grumpkin_point(const WitnessOrConstant& bool has_valid_witness_assignments, Builder& builder); -} // namespace acir_format \ No newline at end of file +} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp index b396b7a07f71..d87cbddf6c13 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp @@ -15,7 +15,7 @@ #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/common/zip_view.hpp" #include "barretenberg/dsl/acir_format/acir_format.hpp" -#include "barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp" +#include "barretenberg/dsl/acir_format/pg_recursion_constraint.hpp" #include "barretenberg/honk/execution_trace/mega_execution_trace.hpp" #include "barretenberg/serialize/msgpack.hpp" @@ -47,9 +47,9 @@ WASM_EXPORT void acir_prove_and_verify_ultra_honk(uint8_t const* acir_vec, uint8 auto builder = acir_format::create_circuit(program, metadata); - auto proving_key = std::make_shared>(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - UltraProver prover{ proving_key, verification_key }; + auto prover_instance = std::make_shared>(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + UltraProver prover{ prover_instance, verification_key }; auto proof = prover.construct_proof(); UltraVerifier verifier{ verification_key }; @@ -69,9 +69,9 @@ WASM_EXPORT void acir_prove_and_verify_mega_honk(uint8_t const* acir_vec, uint8_ auto builder = acir_format::create_circuit(program, metadata); - auto proving_key = std::make_shared>(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - MegaProver prover{ proving_key, verification_key }; + auto prover_instance = std::make_shared>(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + MegaProver prover{ prover_instance, verification_key }; auto proof = prover.construct_proof(); MegaVerifier verifier{ verification_key }; @@ -132,11 +132,11 @@ WASM_EXPORT void acir_prove_ultra_zk_honk(uint8_t const* acir_vec, acir_format::witness_buf_to_witness_data(from_buffer>(witness_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - auto proving_key = std::make_shared>(builder); + auto prover_instance = std::make_shared>(builder); auto verification_key = std::make_shared(from_buffer(vk_buf)); - return UltraZKProver(proving_key, verification_key); + return UltraZKProver(prover_instance, verification_key); }(); auto proof = prover.construct_proof(); @@ -157,10 +157,10 @@ WASM_EXPORT void acir_prove_ultra_keccak_honk(uint8_t const* acir_vec, }; auto builder = acir_format::create_circuit(program, metadata); - auto proving_key = std::make_shared>(builder); + auto prover_instance = std::make_shared>(builder); auto verification_key = std::make_shared( from_buffer(vk_buf)); - return UltraKeccakProver(proving_key, verification_key); + return UltraKeccakProver(prover_instance, verification_key); }(); auto proof = prover.construct_proof(); *out = to_heap_buffer(to_buffer(proof)); @@ -180,10 +180,10 @@ WASM_EXPORT void acir_prove_ultra_keccak_zk_honk(uint8_t const* acir_vec, }; auto builder = acir_format::create_circuit(program, metadata); - auto proving_key = std::make_shared>(builder); + auto prover_instance = std::make_shared>(builder); auto verification_key = std::make_shared( from_buffer(vk_buf)); - return UltraKeccakZKProver(proving_key, verification_key); + return UltraKeccakZKProver(prover_instance, verification_key); }(); auto proof = prover.construct_proof(); *out = to_heap_buffer(to_buffer(proof)); @@ -317,53 +317,53 @@ WASM_EXPORT void acir_verify_ultra_starknet_zk_honk([[maybe_unused]] uint8_t con WASM_EXPORT void acir_write_vk_ultra_honk(uint8_t const* acir_vec, uint8_t** out) { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = UltraFlavor::VerificationKey; // lambda to free the builder - DeciderProvingKey proving_key = [&] { + ProverInstance prover_instance = [&] { const acir_format::ProgramMetadata metadata{ .honk_recursion = 1 }; acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( from_buffer>(acir_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - return DeciderProvingKey(builder); + return ProverInstance(builder); }(); - VerificationKey vk(proving_key.get_precomputed()); + VerificationKey vk(prover_instance.get_precomputed()); vinfo("Constructed UltraHonk verification key"); *out = to_heap_buffer(to_buffer(vk)); } WASM_EXPORT void acir_write_vk_ultra_keccak_honk(uint8_t const* acir_vec, uint8_t** out) { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = UltraKeccakFlavor::VerificationKey; // lambda to free the builder - DeciderProvingKey proving_key = [&] { + ProverInstance prover_instance = [&] { const acir_format::ProgramMetadata metadata{ .honk_recursion = 1 }; acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( from_buffer>(acir_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - return DeciderProvingKey(builder); + return ProverInstance(builder); }(); - VerificationKey vk(proving_key.get_precomputed()); + VerificationKey vk(prover_instance.get_precomputed()); vinfo("Constructed UltraKeccakHonk verification key"); *out = to_heap_buffer(to_buffer(vk)); } WASM_EXPORT void acir_write_vk_ultra_keccak_zk_honk(uint8_t const* acir_vec, uint8_t** out) { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = UltraKeccakZKFlavor::VerificationKey; // lambda to free the builder - DeciderProvingKey proving_key = [&] { + ProverInstance prover_instance = [&] { const acir_format::ProgramMetadata metadata{ .honk_recursion = 1 }; acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( from_buffer>(acir_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - return DeciderProvingKey(builder); + return ProverInstance(builder); }(); - VerificationKey vk(proving_key.get_precomputed()); + VerificationKey vk(prover_instance.get_precomputed()); vinfo("Constructed UltraKeccakZKHonk verification key"); *out = to_heap_buffer(to_buffer(vk)); } @@ -372,18 +372,18 @@ WASM_EXPORT void acir_write_vk_ultra_starknet_honk([[maybe_unused]] uint8_t cons [[maybe_unused]] uint8_t** out) { #ifdef STARKNET_GARAGA_FLAVORS - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = UltraStarknetFlavor::VerificationKey; // lambda to free the builder - DeciderProvingKey proving_key = [&] { + ProverInstance prover_instance = [&] { const acir_format::ProgramMetadata metadata{ .honk_recursion = 1 }; acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( from_buffer>(acir_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - return DeciderProvingKey(builder); + return ProverInstance(builder); }(); - VerificationKey vk(proving_key.get_precomputed()); + VerificationKey vk(prover_instance.get_precomputed()); vinfo("Constructed UltraStarknetHonk verification key"); *out = to_heap_buffer(to_buffer(vk)); #else @@ -395,18 +395,18 @@ WASM_EXPORT void acir_write_vk_ultra_starknet_zk_honk([[maybe_unused]] uint8_t c [[maybe_unused]] uint8_t** out) { #ifdef STARKNET_GARAGA_FLAVORS - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = UltraStarknetZKFlavor::VerificationKey; // lambda to free the builder - DeciderProvingKey proving_key = [&] { + ProverInstance prover_instance = [&] { const acir_format::ProgramMetadata metadata{ .honk_recursion = 1 }; acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( from_buffer>(acir_vec)) }; auto builder = acir_format::create_circuit(program, metadata); - return DeciderProvingKey(builder); + return ProverInstance(builder); }(); - VerificationKey vk(proving_key.get_precomputed()); + VerificationKey vk(prover_instance.get_precomputed()); vinfo("Constructed UltraStarknetZKHonk verification key"); *out = to_heap_buffer(to_buffer(vk)); #else diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.hpp index e9b503094ccb..facbf0ca781e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.hpp @@ -44,10 +44,10 @@ WASM_EXPORT void acir_init_verification_key(in_ptr acir_composer_ptr); WASM_EXPORT void acir_get_verification_key(in_ptr acir_composer_ptr, uint8_t** out); -WASM_EXPORT void acir_get_proving_key(in_ptr acir_composer_ptr, - uint8_t const* acir_vec, - bool const* recursive, - uint8_t** out); +WASM_EXPORT void acir_get_prover_instance(in_ptr acir_composer_ptr, + uint8_t const* acir_vec, + bool const* recursive, + uint8_t** out); WASM_EXPORT void acir_verify_proof(in_ptr acir_composer_ptr, uint8_t const* proof_buf, bool* result); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp index 935c691bf7be..2605aedb37a2 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp @@ -15,7 +15,7 @@ static const char HONK_CONTRACT_SOURCE[] = R"( pragma solidity ^0.8.27; interface IVerifier { - function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool); + function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external returns (bool); } type Fr is uint256; @@ -494,10 +494,10 @@ library TranscriptLib { pure returns (Fr[CONST_PROOF_SIZE_LOG_N] memory gateChallenges, Fr nextPreviousChallenge) { - for (uint256 i = 0; i < logN; i++) { - previousChallenge = FrLib.fromBytes32(keccak256(abi.encodePacked(Fr.unwrap(previousChallenge)))); - Fr unused; - (gateChallenges[i], unused) = splitChallenge(previousChallenge); + previousChallenge = FrLib.fromBytes32(keccak256(abi.encodePacked(Fr.unwrap(previousChallenge)))); + (gateChallenges[0],) = splitChallenge(previousChallenge); + for (uint256 i = 1; i < logN; i++) { + gateChallenges[i] = gateChallenges[i - 1] * gateChallenges[i - 1]; } nextPreviousChallenge = previousChallenge; } @@ -674,6 +674,7 @@ library RelationsLib { accumulateNnfRelation(purportedEvaluations, evaluations, powPartialEval); accumulatePoseidonExternalRelation(purportedEvaluations, evaluations, powPartialEval); accumulatePoseidonInternalRelation(purportedEvaluations, evaluations, powPartialEval); + // batch the subrelations with the alpha challenges to obtain the full honk relation accumulator = scaleAndBatchSubrelations(evaluations, alphas); } @@ -1051,7 +1052,7 @@ library RelationsLib { ap.index_delta = wire(p, WIRE.W_L_SHIFT) - wire(p, WIRE.W_L); ap.record_delta = wire(p, WIRE.W_4_SHIFT) - wire(p, WIRE.W_4); - ap.index_is_monotonically_increasing = ap.index_delta * ap.index_delta - ap.index_delta; // deg 2 + ap.index_is_monotonically_increasing = ap.index_delta * (ap.index_delta - Fr.wrap(1)); // deg 2 ap.adjacent_values_match_if_adjacent_indices_match = (ap.index_delta * MINUS_ONE + ONE) * ap.record_delta; // deg 2 @@ -1082,7 +1083,7 @@ library RelationsLib { * with a WRITE operation. */ Fr access_type = (wire(p, WIRE.W_4) - ap.partial_record_check); // will be 0 or 1 for honest Prover; deg 1 or 4 - ap.access_check = access_type * access_type - access_type; // check value is 0 or 1; deg 2 or 8 + ap.access_check = access_type * (access_type - Fr.wrap(1)); // check value is 0 or 1; deg 2 or 8 // reverse order we could re-use `ap.partial_record_check` 1 - ((w3' * eta + w2') * eta + w1') * eta // deg 1 or 4 @@ -1256,7 +1257,7 @@ library RelationsLib { function accumulatePoseidonExternalRelation( Fr[NUMBER_OF_ENTITIES] memory p, Fr[NUMBER_OF_SUBRELATIONS] memory evals, - Fr domainSep // i guess this is the scaling factor? + Fr domainSep ) internal pure { PoseidonExternalParams memory ep; @@ -1354,7 +1355,7 @@ library RelationsLib { Fr[NUMBER_OF_SUBRELATIONS] memory evaluations, Fr[NUMBER_OF_ALPHAS] memory subrelationChallenges ) internal pure returns (Fr accumulator) { - accumulator = accumulator + evaluations[0]; + accumulator = evaluations[0]; for (uint256 i = 1; i < NUMBER_OF_SUBRELATIONS; ++i) { accumulator = accumulator + evaluations[i] * subrelationChallenges[i - 1]; @@ -1421,10 +1422,9 @@ library CommitmentSchemeLib { ); // Divide by the denominator batchedEvalRoundAcc = batchedEvalRoundAcc * (challengePower * (ONE - u) + u).invert(); - if (i <= logSize) { - batchedEvalAccumulator = batchedEvalRoundAcc; - foldPosEvaluations[i - 1] = batchedEvalRoundAcc; - } + + batchedEvalAccumulator = batchedEvalRoundAcc; + foldPosEvaluations[i - 1] = batchedEvalRoundAcc; } return foldPosEvaluations; } @@ -1789,6 +1789,8 @@ abstract contract BaseHonkVerifier is IVerifier { return sumcheckVerified && shpleminiVerified; // Boolean condition not required - nice for vanity :) } + uint256 constant PERMUTATION_ARGUMENT_VALUE_SEPARATOR = 1 << 28; + function computePublicInputDelta( bytes32[] memory publicInputs, Fr[PAIRING_POINTS_SIZE] memory pairingPointObject, @@ -1799,7 +1801,7 @@ abstract contract BaseHonkVerifier is IVerifier { Fr numerator = ONE; Fr denominator = ONE; - Fr numeratorAcc = gamma + (beta * FrLib.from($N + offset)); + Fr numeratorAcc = gamma + (beta * FrLib.from(PERMUTATION_ARGUMENT_VALUE_SEPARATOR + offset)); Fr denominatorAcc = gamma - (beta * FrLib.from(offset + 1)); { @@ -2071,32 +2073,29 @@ abstract contract BaseHonkVerifier is IVerifier { // Compute Shplonk constant term contributions from Aₗ(± r^{2ˡ}) for l = 1, ..., m-1; // Compute scalar multipliers for each fold commitment for (uint256 i = 0; i < $LOG_N - 1; ++i) { - bool dummy_round = i >= ($LOG_N - 1); - - if (!dummy_round) { - // Update inverted denominators - mem.posInvertedDenominator = (tp.shplonkZ - powers_of_evaluation_challenge[i + 1]).invert(); - mem.negInvertedDenominator = (tp.shplonkZ + powers_of_evaluation_challenge[i + 1]).invert(); - - // Compute the scalar multipliers for Aₗ(± r^{2ˡ}) and [Aₗ] - mem.scalingFactorPos = mem.batchingChallenge * mem.posInvertedDenominator; - mem.scalingFactorNeg = mem.batchingChallenge * tp.shplonkNu * mem.negInvertedDenominator; - // [Aₗ] is multiplied by -v^{2l}/(z-r^{2^l}) - v^{2l+1} /(z+ r^{2^l}) - scalars[NUMBER_UNSHIFTED + 1 + i] = mem.scalingFactorNeg.neg() + mem.scalingFactorPos.neg(); - - // Accumulate the const term contribution given by - // v^{2l} * Aₗ(r^{2ˡ}) /(z-r^{2^l}) + v^{2l+1} * Aₗ(-r^{2ˡ}) /(z+ r^{2^l}) - Fr accumContribution = mem.scalingFactorNeg * proof.geminiAEvaluations[i + 1]; - accumContribution = accumContribution + mem.scalingFactorPos * foldPosEvaluations[i + 1]; - mem.constantTermAccumulator = mem.constantTermAccumulator + accumContribution; - // Update the running power of v - mem.batchingChallenge = mem.batchingChallenge * tp.shplonkNu * tp.shplonkNu; - } + // Update inverted denominators + mem.posInvertedDenominator = (tp.shplonkZ - powers_of_evaluation_challenge[i + 1]).invert(); + mem.negInvertedDenominator = (tp.shplonkZ + powers_of_evaluation_challenge[i + 1]).invert(); + + // Compute the scalar multipliers for Aₗ(± r^{2ˡ}) and [Aₗ] + mem.scalingFactorPos = mem.batchingChallenge * mem.posInvertedDenominator; + mem.scalingFactorNeg = mem.batchingChallenge * tp.shplonkNu * mem.negInvertedDenominator; + // [Aₗ] is multiplied by -v^{2l}/(z-r^{2^l}) - v^{2l+1} /(z+ r^{2^l}) + scalars[NUMBER_UNSHIFTED + 1 + i] = mem.scalingFactorNeg.neg() + mem.scalingFactorPos.neg(); + + // Accumulate the const term contribution given by + // v^{2l} * Aₗ(r^{2ˡ}) /(z-r^{2^l}) + v^{2l+1} * Aₗ(-r^{2ˡ}) /(z+ r^{2^l}) + Fr accumContribution = mem.scalingFactorNeg * proof.geminiAEvaluations[i + 1]; + + accumContribution = accumContribution + mem.scalingFactorPos * foldPosEvaluations[i + 1]; + mem.constantTermAccumulator = mem.constantTermAccumulator + accumContribution; + // Update the running power of v + mem.batchingChallenge = mem.batchingChallenge * tp.shplonkNu * tp.shplonkNu; commitments[NUMBER_UNSHIFTED + 1 + i] = proof.geminiFoldComms[i]; } - // Finalise the batch opening claim + // Finalize the batch opening claim commitments[NUMBER_UNSHIFTED + $LOG_N] = Honk.G1Point({x: 1, y: 2}); scalars[NUMBER_UNSHIFTED + $LOG_N] = mem.constantTermAccumulator; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp new file mode 100644 index 000000000000..de1249a771c6 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp @@ -0,0 +1,3586 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once +#include +#include +#include +#include +#include + +// Complete implementation of generate_offsets.py converted to C++ +inline std::string generate_memory_offsets(int log_n) +{ + const int BATCHED_RELATION_PARTIAL_LENGTH = 8; + const int NUMBER_OF_SUBRELATIONS = 28; + const int NUMBER_OF_ALPHAS = NUMBER_OF_SUBRELATIONS - 1; + const int START_POINTER = 0x1000; + const int SCRATCH_SPACE_POINTER = 0x100; + const int BARYCENTRIC_DOMAIN_SIZE = 8; + + std::ostringstream out; + + // Helper lambdas + auto print_header_centered = [&](const std::string& text) { + const std::string top = "/*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/"; + const std::string bottom = "/*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/"; + size_t width = static_cast(top.length()) - 4; // exclude /* and */ + std::string centered = + "/*" + std::string(static_cast((width - text.length()) / 2), ' ') + text + + std::string(static_cast(width - text.length() - (width - text.length()) / 2), ' ') + "*/"; + out << "\n" << top << "\n" << centered << "\n" << bottom << "\n"; + }; + + auto print_loc = [&](int pointer, const std::string& name) { + out << "uint256 internal constant " << name << " = " << std::showbase << std::hex << pointer << ";\n"; + }; + + auto print_fr = print_loc; + + auto print_g1 = [&](int pointer, const std::string& name) { + print_loc(pointer, name + "_X_LOC"); + print_loc(pointer + 32, name + "_Y_LOC"); + }; + + // Data arrays from Python script + const std::vector vk_fr = { "VK_CIRCUIT_SIZE_LOC", + "VK_NUM_PUBLIC_INPUTS_LOC", + "VK_PUB_INPUTS_OFFSET_LOC" }; + + const std::vector vk_g1 = { "Q_M", + "Q_C", + "Q_L", + "Q_R", + "Q_O", + "Q_4", + "Q_LOOKUP", + "Q_ARITH", + "Q_DELTA_RANGE", + "Q_ELLIPTIC", + "Q_MEMORY", + "Q_NNF", + "Q_POSEIDON_2_EXTERNAL", + "Q_POSEIDON_2_INTERNAL", + "SIGMA_1", + "SIGMA_2", + "SIGMA_3", + "SIGMA_4", + "ID_1", + "ID_2", + "ID_3", + "ID_4", + "TABLE_1", + "TABLE_2", + "TABLE_3", + "TABLE_4", + "LAGRANGE_FIRST", + "LAGRANGE_LAST" }; + + const std::vector proof_fr = { "PROOF_CIRCUIT_SIZE", + "PROOF_NUM_PUBLIC_INPUTS", + "PROOF_PUB_INPUTS_OFFSET" }; + + const std::vector pairing_points = { "PAIRING_POINT_0", "PAIRING_POINT_1", "PAIRING_POINT_2", + "PAIRING_POINT_3", "PAIRING_POINT_4", "PAIRING_POINT_5", + "PAIRING_POINT_6", "PAIRING_POINT_7", "PAIRING_POINT_8", + "PAIRING_POINT_9", "PAIRING_POINT_10", "PAIRING_POINT_11", + "PAIRING_POINT_12", "PAIRING_POINT_13", "PAIRING_POINT_14", + "PAIRING_POINT_15" }; + + const std::vector proof_g1 = { + "W_L", "W_R", "W_O", "LOOKUP_READ_COUNTS", "LOOKUP_READ_TAGS", "W_4", "LOOKUP_INVERSES", "Z_PERM" + }; + + const std::vector entities = { "QM", + "QC", + "QL", + "QR", + "QO", + "Q4", + "QLOOKUP", + "QARITH", + "QRANGE", + "QELLIPTIC", + "QMEMORY", + "QNNF", + "QPOSEIDON2_EXTERNAL", + "QPOSEIDON2_INTERNAL", + "SIGMA1", + "SIGMA2", + "SIGMA3", + "SIGMA4", + "ID1", + "ID2", + "ID3", + "ID4", + "TABLE1", + "TABLE2", + "TABLE3", + "TABLE4", + "LAGRANGE_FIRST", + "LAGRANGE_LAST", + "W1", + "W2", + "W3", + "W4", + "Z_PERM", + "LOOKUP_INVERSES", + "LOOKUP_READ_COUNTS", + "LOOKUP_READ_TAGS", + "W1_SHIFT", + "W2_SHIFT", + "W3_SHIFT", + "W4_SHIFT", + "Z_PERM_SHIFT" }; + + const std::vector challenges = { "ETA", + "ETA_TWO", + "ETA_THREE", + "BETA", + "GAMMA", + "RHO", + "GEMINI_R", + "SHPLONK_NU", + "SHPLONK_Z", + "PUBLIC_INPUTS_DELTA_NUMERATOR", + "PUBLIC_INPUTS_DELTA_DENOMINATOR" }; + + const std::vector subrelation_intermediates = { "AUX_NON_NATIVE_FIELD_IDENTITY", + "AUX_LIMB_ACCUMULATOR_IDENTITY", + "AUX_RAM_CONSISTENCY_CHECK_IDENTITY", + "AUX_ROM_CONSISTENCY_CHECK_IDENTITY", + "AUX_MEMORY_CHECK_IDENTITY" }; + + const std::vector general_intermediates = { "FINAL_ROUND_TARGET_LOC", "POW_PARTIAL_EVALUATION_LOC" }; + + int pointer = START_POINTER; + + // VK INDICIES + print_header_centered("VK INDICIES"); + for (const auto& item : vk_fr) { + print_fr(pointer, item); + pointer += 32; + } + for (const auto& item : vk_g1) { + print_g1(pointer, item); + pointer += 64; + } + + // PROOF INDICIES + print_header_centered("PROOF INDICIES"); + for (const auto& item : pairing_points) { + print_fr(pointer, item); + pointer += 32; + } + for (const auto& item : proof_g1) { + print_g1(pointer, item); + pointer += 64; + } + + // SUMCHECK UNIVARIATES + print_header_centered("PROOF INDICIES - SUMCHECK UNIVARIATES"); + for (int size = 0; size < log_n; ++size) { + for (int relation_len = 0; relation_len < BATCHED_RELATION_PARTIAL_LENGTH; ++relation_len) { + std::string name = + "SUMCHECK_UNIVARIATE_" + std::to_string(size) + "_" + std::to_string(relation_len) + "_LOC"; + print_fr(pointer, name); + pointer += 32; + } + } + + // SUMCHECK EVALUATIONS + print_header_centered("PROOF INDICIES - SUMCHECK EVALUATIONS"); + for (const auto& entity : entities) { + print_fr(pointer, entity + "_EVAL_LOC"); + pointer += 32; + } + + // SHPLEMINI - GEMINI FOLDING COMMS + print_header_centered("PROOF INDICIES - GEMINI FOLDING COMMS"); + for (int size = 0; size < log_n - 1; ++size) { + print_g1(pointer, "GEMINI_FOLD_UNIVARIATE_" + std::to_string(size)); + pointer += 64; + } + + // GEMINI FOLDING EVALUATIONS + print_header_centered("PROOF INDICIES - GEMINI FOLDING EVALUATIONS"); + for (int size = 0; size < log_n; ++size) { + print_fr(pointer, "GEMINI_A_EVAL_" + std::to_string(size)); + pointer += 32; + } + print_g1(pointer, "SHPLONK_Q"); + pointer += 64; + print_g1(pointer, "KZG_QUOTIENT"); + pointer += 64; + + print_header_centered("PROOF INDICIES - COMPLETE"); + + // CHALLENGES + print_header_centered("CHALLENGES"); + for (const auto& chall : challenges) { + print_fr(pointer, chall + "_CHALLENGE"); + pointer += 32; + } + for (int alpha = 0; alpha < NUMBER_OF_ALPHAS; ++alpha) { + print_fr(pointer, "ALPHA_CHALLENGE_" + std::to_string(alpha)); + pointer += 32; + } + for (int gate = 0; gate < log_n; ++gate) { + print_fr(pointer, "GATE_CHALLENGE_" + std::to_string(gate)); + pointer += 32; + } + for (int sum_u = 0; sum_u < log_n; ++sum_u) { + print_fr(pointer, "SUM_U_CHALLENGE_" + std::to_string(sum_u)); + pointer += 32; + } + print_header_centered("CHALLENGES - COMPLETE"); + + // RUNTIME MEMORY + print_header_centered("SUMCHECK - RUNTIME MEMORY"); + print_header_centered("SUMCHECK - RUNTIME MEMORY - BARYCENTRIC"); + + // Barycentric domain (uses scratch space) + int bary_pointer = SCRATCH_SPACE_POINTER; + for (int i = 0; i < BARYCENTRIC_DOMAIN_SIZE; ++i) { + print_fr(bary_pointer, "BARYCENTRIC_LAGRANGE_DENOMINATOR_" + std::to_string(i) + "_LOC"); + bary_pointer += 32; + } + for (int i = 0; i < log_n; ++i) { + for (int j = 0; j < BARYCENTRIC_DOMAIN_SIZE; ++j) { + print_fr(bary_pointer, + "BARYCENTRIC_DENOMINATOR_INVERSES_" + std::to_string(i) + "_" + std::to_string(j) + "_LOC"); + bary_pointer += 32; + } + } + print_header_centered("SUMCHECK - RUNTIME MEMORY - BARYCENTRIC COMPLETE"); + + // SUBRELATION EVALUATIONS + print_header_centered("SUMCHECK - RUNTIME MEMORY - SUBRELATION EVALUATIONS"); + for (int i = 0; i < NUMBER_OF_SUBRELATIONS; ++i) { + print_fr(pointer, "SUBRELATION_EVAL_" + std::to_string(i) + "_LOC"); + pointer += 32; + } + print_header_centered("SUMCHECK - RUNTIME MEMORY - SUBRELATION EVALUATIONS COMPLETE"); + + // SUBRELATION INTERMEDIATES + print_header_centered("SUMCHECK - RUNTIME MEMORY - SUBRELATION INTERMEDIATES"); + for (const auto& item : general_intermediates) { + print_fr(pointer, item); + pointer += 32; + } + for (const auto& item : subrelation_intermediates) { + print_fr(pointer, item); + pointer += 32; + } + print_header_centered("SUMCHECK - RUNTIME MEMORY - COMPLETE"); + + // SHPLEMINI RUNTIME MEMORY + print_header_centered("SHPLEMINI - RUNTIME MEMORY"); + print_header_centered("SHPLEMINI - POWERS OF EVALUATION CHALLENGE"); + out << "/// {{ UNROLL_SECTION_START POWERS_OF_EVALUATION_CHALLENGE }}\n"; + for (int i = 0; i < log_n; ++i) { + print_fr(pointer, "POWERS_OF_EVALUATION_CHALLENGE_" + std::to_string(i) + "_LOC"); + pointer += 32; + } + out << "/// {{ UNROLL_SECTION_END POWERS_OF_EVALUATION_CHALLENGE }}\n"; + print_header_centered("SHPLEMINI - POWERS OF EVALUATION CHALLENGE COMPLETE"); + + // BATCH SCALARS + print_header_centered("SHPLEMINI - RUNTIME MEMORY - BATCH SCALARS"); + const int BATCH_SIZE = 69; + for (int i = 0; i < BATCH_SIZE; ++i) { + print_fr(pointer, "BATCH_SCALAR_" + std::to_string(i) + "_LOC"); + pointer += 32; + } + print_header_centered("SHPLEMINI - RUNTIME MEMORY - BATCH SCALARS COMPLETE"); + + // INVERSIONS + print_header_centered("SHPLEMINI - RUNTIME MEMORY - INVERSIONS"); + + // Inverted gemini denominators + int inv_pointer = SCRATCH_SPACE_POINTER; + for (int i = 0; i < log_n + 1; ++i) { + print_fr(inv_pointer, "INVERTED_GEMINI_DENOMINATOR_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + // Batched evaluation accumulator inversions + for (int i = 0; i < log_n; ++i) { + print_fr(inv_pointer, "BATCH_EVALUATION_ACCUMULATOR_INVERSION_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + out << "\n"; + print_fr(inv_pointer, "BATCHED_EVALUATION_LOC"); + inv_pointer += 32; + print_fr(inv_pointer, "CONSTANT_TERM_ACCUMULATOR_LOC"); + inv_pointer += 32; + + out << "\n"; + print_fr(inv_pointer, "POS_INVERTED_DENOMINATOR"); + inv_pointer += 32; + print_fr(inv_pointer, "NEG_INVERTED_DENOMINATOR"); + inv_pointer += 32; + + out << "\n"; + out << "// LOG_N challenge pow minus u\n"; + for (int i = 0; i < log_n; ++i) { + print_fr(inv_pointer, "INVERTED_CHALLENEGE_POW_MINUS_U_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + out << "\n"; + out << "// LOG_N pos_inverted_off\n"; + for (int i = 0; i < log_n; ++i) { + print_fr(inv_pointer, "POS_INVERTED_DENOM_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + out << "\n"; + out << "// LOG_N neg_inverted_off\n"; + for (int i = 0; i < log_n; ++i) { + print_fr(inv_pointer, "NEG_INVERTED_DENOM_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + out << "\n"; + for (int i = 0; i < log_n; ++i) { + print_fr(inv_pointer, "FOLD_POS_EVALUATIONS_" + std::to_string(i) + "_LOC"); + inv_pointer += 32; + } + + print_header_centered("SHPLEMINI RUNTIME MEMORY - INVERSIONS - COMPLETE"); + print_header_centered("SHPLEMINI RUNTIME MEMORY - COMPLETE"); + + out << "\n"; + print_fr(pointer, "LATER_SCRATCH_SPACE"); + pointer += 32; + + // Temporary space + print_header_centered("Temporary space"); + for (int i = 0; i < 3 * log_n; ++i) { + print_fr(pointer, "TEMP_" + std::to_string(i) + "_LOC"); + pointer += 32; + } + print_header_centered("Temporary space - COMPLETE"); + + // Scratch space aliases + out << "\n"; + out << "// Aliases for scratch space\n"; + out << "// TODO: work out the stack scheduling for these\n"; + print_fr(0x00, "CHALL_POW_LOC"); + print_fr(0x20, "SUMCHECK_U_LOC"); + print_fr(0x40, "GEMINI_A_LOC"); + out << "\n"; + print_fr(0x00, "SS_POS_INV_DENOM_LOC"); + print_fr(0x20, "SS_NEG_INV_DENOM_LOC"); + print_fr(0x40, "SS_GEMINI_EVALS_LOC"); + + // EC aliases + out << "\n\n"; + out << "// Aliases\n"; + out << "// Aliases for wire values (Elliptic curve gadget)\n"; + print_header_centered("SUMCHECK - MEMORY ALIASES"); + + return out.str(); +} + +// Source code for the Ultrahonk Solidity verifier. +// It's expected that the AcirComposer will inject a library which will load the verification key into memory. +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) +static const char HONK_CONTRACT_OPT_SOURCE[] = R"( +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Aztec +pragma solidity ^0.8.27; + +interface IVerifier { + function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool); +} + + + +uint256 constant NUMBER_OF_SUBRELATIONS = 28; +uint256 constant BATCHED_RELATION_PARTIAL_LENGTH = 8; +uint256 constant ZK_BATCHED_RELATION_PARTIAL_LENGTH = 9; +uint256 constant NUMBER_OF_ENTITIES = 41; +uint256 constant NUMBER_UNSHIFTED = 36; +uint256 constant NUMBER_TO_BE_SHIFTED = 5; +uint256 constant PAIRING_POINTS_SIZE = 16; + +uint256 constant VK_HASH = {{ VK_HASH }}; +uint256 constant CIRCUIT_SIZE = {{ CIRCUIT_SIZE }}; +uint256 constant LOG_N = {{ LOG_CIRCUIT_SIZE }}; +uint256 constant NUMBER_PUBLIC_INPUTS = {{ NUM_PUBLIC_INPUTS }}; +uint256 constant REAL_NUMBER_PUBLIC_INPUTS = {{ NUM_PUBLIC_INPUTS }} - 16; +uint256 constant PUBLIC_INPUTS_OFFSET = 1; +// LOG_N * 8 +uint256 constant NUMBER_OF_BARYCENTRIC_INVERSES = {{ NUMBER_OF_BARYCENTRIC_INVERSES }}; + +error PUBLIC_INPUT_TOO_LARGE(); +error SUMCHECK_FAILED(); +error PAIRING_FAILED(); +error BATCH_ACCUMULATION_FAILED(); +error MODEXP_FAILED(); + +contract HonkVerifier is IVerifier { + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SLAB ALLOCATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + /** + * We manually manage memory within this optimised implementation + * Memory is loaded into a large slab that is ordered in the following way + * + * // TODO: ranges + * ** + */ + + // {{ SECTION_START MEMORY_LAYOUT }} + // {{ SECTION_END MEMORY_LAYOUT }} + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SUMCHECK - MEMORY ALIASES */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + uint256 internal constant EC_X_1 = W2_EVAL_LOC; + uint256 internal constant EC_Y_1 = W3_EVAL_LOC; + uint256 internal constant EC_X_2 = W1_SHIFT_EVAL_LOC; + uint256 internal constant EC_Y_2 = W4_SHIFT_EVAL_LOC; + uint256 internal constant EC_Y_3 = W3_SHIFT_EVAL_LOC; + uint256 internal constant EC_X_3 = W2_SHIFT_EVAL_LOC; + + // Aliases for selectors (Elliptic curve gadget) + uint256 internal constant EC_Q_SIGN = QL_EVAL_LOC; + uint256 internal constant EC_Q_IS_DOUBLE = QM_EVAL_LOC; + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* CONSTANTS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + uint256 internal constant GRUMPKIN_CURVE_B_PARAMETER_NEGATED = 17; // -(-17) + + // Auxiliary relation constants + // In the Non Native Field Arithmetic Relation, large field elements are broken up into 4 LIMBs of 68 `LIMB_SIZE` bits each. + uint256 internal constant LIMB_SIZE = 0x100000000000000000; // 2<<68 + + // In the Delta Range Check Relation, there is a range checking relation that can validate 14-bit range checks with only 1 + // extra relation in the execution trace. + // For large range checks, we decompose them into a collection of 14-bit range checks. + uint256 internal constant SUBLIMB_SHIFT = 0x4000; // 2<<14 + + // Poseidon2 internal constants + // https://github.com/HorizenLabs/poseidon2/blob/main/poseidon2_rust_params.sage - derivation code + uint256 internal constant POS_INTERNAL_MATRIX_D_0 = + 0x10dc6e9c006ea38b04b1e03b4bd9490c0d03f98929ca1d7fb56821fd19d3b6e7; + uint256 internal constant POS_INTERNAL_MATRIX_D_1 = + 0x0c28145b6a44df3e0149b3d0a30b3bb599df9756d4dd9b84a86b38cfb45a740b; + uint256 internal constant POS_INTERNAL_MATRIX_D_2 = + 0x00544b8338791518b2c7645a50392798b21f75bb60e3596170067d00141cac15; + uint256 internal constant POS_INTERNAL_MATRIX_D_3 = + 0x222c01175718386f2e2e82eb122789e352e105a3b8fa852613bc534433ee428b; + + // Constants inspecting proof components + uint256 internal constant NUMBER_OF_UNSHIFTED_ENTITIES = 36; + // Shifted columns are columes that are duplicates of existing columns but right-shifted by 1 + uint256 internal constant NUMBER_OF_SHIFTED_ENTITIES = 5; + uint256 internal constant TOTAL_NUMBER_OF_ENTITIES = 41; + + // Constants for performing batch multiplication + uint256 internal constant ACCUMULATOR = 0x00; + uint256 internal constant ACCUMULATOR_2 = 0x40; + uint256 internal constant G1_LOCATION = 0x60; + uint256 internal constant G1_Y_LOCATION = 0x80; + uint256 internal constant SCALAR_LOCATION = 0xa0; + + uint256 internal constant LOWER_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; + + // Group order + uint256 internal constant Q = 21888242871839275222246405745257275088696311157297823662689037894645226208583; // EC group order + + // Field order constants + // -1/2 mod p + uint256 internal constant NEG_HALF_MODULO_P = 0x183227397098d014dc2822db40c0ac2e9419f4243cdcb848a1f0fac9f8000000; + uint256 internal constant P = 21888242871839275222246405745257275088548364400416034343698204186575808495617; + uint256 internal constant P_SUB_1 = 21888242871839275222246405745257275088548364400416034343698204186575808495616; + uint256 internal constant P_SUB_2 = 21888242871839275222246405745257275088548364400416034343698204186575808495615; + uint256 internal constant P_SUB_3 = 21888242871839275222246405745257275088548364400416034343698204186575808495614; + uint256 internal constant P_SUB_4 = 21888242871839275222246405745257275088548364400416034343698204186575808495613; + uint256 internal constant P_SUB_5 = 21888242871839275222246405745257275088548364400416034343698204186575808495612; + uint256 internal constant P_SUB_6 = 21888242871839275222246405745257275088548364400416034343698204186575808495611; + uint256 internal constant P_SUB_7 = 21888242871839275222246405745257275088548364400416034343698204186575808495610; + + // Barycentric evaluation constants + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_0 = + 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593efffec51; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_1 = + 0x00000000000000000000000000000000000000000000000000000000000002d0; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_2 = + 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593efffff11; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_3 = + 0x0000000000000000000000000000000000000000000000000000000000000090; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_4 = + 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593efffff71; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_5 = + 0x00000000000000000000000000000000000000000000000000000000000000f0; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_6 = + 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593effffd31; + uint256 internal constant BARYCENTRIC_LAGRANGE_DENOMINATOR_7 = + 0x00000000000000000000000000000000000000000000000000000000000013b0; + + // Constants for computing public input delta + uint256 constant PERMUTATION_ARGUMENT_VALUE_SEPARATOR = 1 << 28; + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* ERRORS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + uint256 internal constant PUBLIC_INPUT_TOO_LARGE_SELECTOR = 0x803bff7c; + uint256 internal constant SUMCHECK_FAILED_SELECTOR = 0x7d06dd7fa; + uint256 internal constant PAIRING_FAILED_SELECTOR = 0xd71fd2634; + uint256 internal constant BATCH_ACCUMULATION_FAILED_SELECTOR = 0xfef01a9a4; + uint256 internal constant MODEXP_FAILED_SELECTOR = 0xf442f1632; + uint256 internal constant PROOF_POINT_NOT_ON_CURVE_SELECTOR = 0x661e012dec; + + constructor() {} + + function verify(bytes calldata, /*proof*/ bytes32[] calldata /*public_inputs*/ ) + public + view + override + returns (bool) + { + // Load the proof from calldata in one large chunk + assembly { + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* LOAD VERIFCATION KEY */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // Write the verification key into memory + // + // Although defined at the top of the file, it is used towards the end of the algorithm when batching in the commitment scheme. + function loadVk() { + mstore(Q_L_X_LOC, {{ Q_L_X_LOC }}) + mstore(Q_L_Y_LOC, {{ Q_L_Y_LOC }}) + mstore(Q_R_X_LOC, {{ Q_R_X_LOC }}) + mstore(Q_R_Y_LOC, {{ Q_R_Y_LOC }}) + mstore(Q_O_X_LOC, {{ Q_O_X_LOC }}) + mstore(Q_O_Y_LOC, {{ Q_O_Y_LOC }}) + mstore(Q_4_X_LOC, {{ Q_4_X_LOC }}) + mstore(Q_4_Y_LOC, {{ Q_4_Y_LOC }}) + mstore(Q_M_X_LOC, {{ Q_M_X_LOC }}) + mstore(Q_M_Y_LOC, {{ Q_M_Y_LOC }}) + mstore(Q_C_X_LOC, {{ Q_C_X_LOC }}) + mstore(Q_C_Y_LOC, {{ Q_C_Y_LOC }}) + mstore(Q_LOOKUP_X_LOC, {{ Q_LOOKUP_X_LOC }}) + mstore(Q_LOOKUP_Y_LOC, {{ Q_LOOKUP_Y_LOC }}) + mstore(Q_ARITH_X_LOC, {{ Q_ARITH_X_LOC }}) + mstore(Q_ARITH_Y_LOC, {{ Q_ARITH_Y_LOC }}) + mstore(Q_DELTA_RANGE_X_LOC, {{ Q_DELTA_RANGE_X_LOC }}) + mstore(Q_DELTA_RANGE_Y_LOC, {{ Q_DELTA_RANGE_Y_LOC }}) + mstore(Q_ELLIPTIC_X_LOC, {{ Q_ELLIPTIC_X_LOC }}) + mstore(Q_ELLIPTIC_Y_LOC, {{ Q_ELLIPTIC_Y_LOC }}) + mstore(Q_MEMORY_X_LOC, {{ Q_MEMORY_X_LOC }}) + mstore(Q_MEMORY_Y_LOC, {{ Q_MEMORY_Y_LOC }}) + mstore(Q_NNF_X_LOC, {{ Q_NNF_X_LOC }}) + mstore(Q_NNF_Y_LOC, {{ Q_NNF_Y_LOC }}) + mstore(Q_POSEIDON_2_EXTERNAL_X_LOC, {{ Q_POSEIDON_2_EXTERNAL_X_LOC }}) + mstore(Q_POSEIDON_2_EXTERNAL_Y_LOC, {{ Q_POSEIDON_2_EXTERNAL_Y_LOC }}) + mstore(Q_POSEIDON_2_INTERNAL_X_LOC, {{ Q_POSEIDON_2_INTERNAL_X_LOC }}) + mstore(Q_POSEIDON_2_INTERNAL_Y_LOC, {{ Q_POSEIDON_2_INTERNAL_Y_LOC }}) + mstore(SIGMA_1_X_LOC, {{ SIGMA_1_X_LOC }}) + mstore(SIGMA_1_Y_LOC, {{ SIGMA_1_Y_LOC }}) + mstore(SIGMA_2_X_LOC, {{ SIGMA_2_X_LOC }}) + mstore(SIGMA_2_Y_LOC, {{ SIGMA_2_Y_LOC }}) + mstore(SIGMA_3_X_LOC, {{ SIGMA_3_X_LOC }}) + mstore(SIGMA_3_Y_LOC, {{ SIGMA_3_Y_LOC }}) + mstore(SIGMA_4_X_LOC, {{ SIGMA_4_X_LOC }}) + mstore(SIGMA_4_Y_LOC, {{ SIGMA_4_Y_LOC }}) + mstore(TABLE_1_X_LOC, {{ TABLE_1_X_LOC }}) + mstore(TABLE_1_Y_LOC, {{ TABLE_1_Y_LOC }}) + mstore(TABLE_2_X_LOC, {{ TABLE_2_X_LOC }}) + mstore(TABLE_2_Y_LOC, {{ TABLE_2_Y_LOC }}) + mstore(TABLE_3_X_LOC, {{ TABLE_3_X_LOC }}) + mstore(TABLE_3_Y_LOC, {{ TABLE_3_Y_LOC }}) + mstore(TABLE_4_X_LOC, {{ TABLE_4_X_LOC }}) + mstore(TABLE_4_Y_LOC, {{ TABLE_4_Y_LOC }}) + mstore(ID_1_X_LOC, {{ ID_1_X_LOC }}) + mstore(ID_1_Y_LOC, {{ ID_1_Y_LOC }}) + mstore(ID_2_X_LOC, {{ ID_2_X_LOC }}) + mstore(ID_2_Y_LOC, {{ ID_2_Y_LOC }}) + mstore(ID_3_X_LOC, {{ ID_3_X_LOC }}) + mstore(ID_3_Y_LOC, {{ ID_3_Y_LOC }}) + mstore(ID_4_X_LOC, {{ ID_4_X_LOC }}) + mstore(ID_4_Y_LOC, {{ ID_4_Y_LOC }}) + mstore(LAGRANGE_FIRST_X_LOC, {{ LAGRANGE_FIRST_X_LOC }}) + mstore(LAGRANGE_FIRST_Y_LOC, {{ LAGRANGE_FIRST_Y_LOC }}) + mstore(LAGRANGE_LAST_X_LOC, {{ LAGRANGE_LAST_X_LOC }}) + mstore(LAGRANGE_LAST_Y_LOC, {{ LAGRANGE_LAST_Y_LOC }}) + } + + // Prime field order - placing on the stack + let p := P + + { + let proof_ptr := add(calldataload(0x04), 0x24) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* GENERATE CHALLENGES */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + /* + * Proof points (affine coordinates) in the proof are in the following format, where offset is + * the offset in the entire proof until the first bit of the x coordinate + * offset + 0x00: x + * offset + 0x20: y + */ + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* GENERATE ETA CHALLENGE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + /* Eta challenge participants + * - circuit size + * - number of public inputs + * - public inputs offset + * - w1 + * - w2 + * - w3 + * + * Where circuit size, number of public inputs and public inputs offset are all 32 byte values + * and w1,w2,w3 are all proof points values + */ + + mstore(0x00, VK_HASH) + + let public_inputs_start := add(calldataload(0x24), 0x24) + let public_inputs_size := mul(REAL_NUMBER_PUBLIC_INPUTS, 0x20) + + // Copy the public inputs into the eta buffer + calldatacopy(0x20, public_inputs_start, public_inputs_size) + + // Copy Pairing points into eta buffer + let public_inputs_end := add(0x20, public_inputs_size) + + calldatacopy(public_inputs_end, proof_ptr, 0x200) + + // 0x20 * 8 = 0x100 + // End of public inputs + pairing point + calldatacopy(add(0x220, public_inputs_size), add(proof_ptr, 0x200), 0x100) + + // 0x2e0 = 1 * 32 bytes + 3 * 64 bytes for (w1,w2,w3) + 0x200 for pairing points + let eta_input_length := add(0x2e0, public_inputs_size) + + let prev_challenge := mod(keccak256(0x00, eta_input_length), p) + mstore(0x00, prev_challenge) + + let eta := and(prev_challenge, LOWER_128_MASK) + let etaTwo := shr(128, prev_challenge) + + mstore(ETA_CHALLENGE, eta) + mstore(ETA_TWO_CHALLENGE, etaTwo) + + prev_challenge := mod(keccak256(0x00, 0x20), p) + + mstore(0x00, prev_challenge) + let eta_three := and(prev_challenge, LOWER_128_MASK) + mstore(ETA_THREE_CHALLENGE, eta_three) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* LOAD PROOF INTO MEMORY */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // As all of our proof points are written in contiguous parts of memory, we call use a single + // calldatacopy to place all of our proof into the correct memory regions + // We copy the entire proof into memory as we must hash each proof section for challenge + // evaluation + // The last item in the proof, and the first item in the proof (pairing point 0) + let proof_size := sub(ETA_CHALLENGE, PAIRING_POINT_0) + + calldatacopy(PAIRING_POINT_0, proof_ptr, proof_size) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* GENERATE BETA and GAMMAA CHALLENGE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + // Generate Beta and Gamma Chalenges + // - prevChallenge + // - LOOKUP_READ_COUNTS + // - LOOKUP_READ_TAGS + // - W4 + mcopy(0x20, LOOKUP_READ_COUNTS_X_LOC, 0xc0) + + prev_challenge := mod(keccak256(0x00, 0xe0), p) + mstore(0x00, prev_challenge) + let beta := and(prev_challenge, LOWER_128_MASK) + let gamma := shr(128, prev_challenge) + + mstore(BETA_CHALLENGE, beta) + mstore(GAMMA_CHALLENGE, gamma) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* ALPHA CHALLENGES */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // Generate Alpha challenges - non-linearise the gate contributions + // + // There are 26 total subrelations in this honk relation, we do not need to non linearise the first sub relation. + // There are 25 total gate contributions, a gate contribution is analogous to + // a custom gate, it is an expression which must evaluate to zero for each + // row in the constraint matrix + // + // If we do not non-linearise sub relations, then sub relations which rely + // on the same wire will interact with each other's sums. + + mcopy(0x20, LOOKUP_INVERSES_X_LOC, 0x80) + + prev_challenge := mod(keccak256(0x00, 0xa0), p) + mstore(0x00, prev_challenge) + let alpha_0 := and(prev_challenge, LOWER_128_MASK) + let alpha_1 := shr(128, prev_challenge) + mstore(ALPHA_CHALLENGE_0, alpha_0) + mstore(ALPHA_CHALLENGE_1, alpha_1) + + // For number of alphas / 2 ( 26 /2 ) + let alpha_off_set := ALPHA_CHALLENGE_2 + for {} lt(alpha_off_set, ALPHA_CHALLENGE_26) {} { + prev_challenge := mod(keccak256(0x00, 0x20), p) + mstore(0x00, prev_challenge) + + let alpha_even := and(prev_challenge, LOWER_128_MASK) + let alpha_odd := shr(128, prev_challenge) + + mstore(alpha_off_set, alpha_even) + mstore(add(alpha_off_set, 0x20), alpha_odd) + + alpha_off_set := add(alpha_off_set, 0x40) + } + + // The final alpha challenge + prev_challenge := mod(keccak256(0x00, 0x20), p) + mstore(0x00, prev_challenge) + + let alpha_26 := and(prev_challenge, LOWER_128_MASK) + mstore(ALPHA_CHALLENGE_26, alpha_26) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* GATE CHALLENGES */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + // Store the first gate challenge + prev_challenge := mod(keccak256(0x00, 0x20), p) + mstore(0x00, prev_challenge) + let gate_challenge := and(prev_challenge, LOWER_128_MASK) + mstore(GATE_CHALLENGE_0, gate_challenge) + + let gate_off := GATE_CHALLENGE_1 + for {} lt(gate_off, SUM_U_CHALLENGE_0) {} { + let prev := mload(sub(gate_off, 0x20)) + + mstore(gate_off, mulmod(prev, prev, p)) + gate_off := add(gate_off, 0x20) + } + + // Sumcheck Univariate challenges + // The algebraic relations of the Honk protocol are max degree-7. + // To prove satifiability, we multiply the relation by a random (POW) polynomial. We do this as we want all of our relations + // to be zero on every row - not for the sum of the relations to be zero. (Which is all sumcheck can do without this modification) + // + // As a result, in every round of sumcheck, the prover sends an degree-8 univariate polynomial. + // The sumcheck univariate challenge produces a challenge for each round of sumcheck, hashing the prev_challenge with + // a hash of the degree 8 univariate polynomial provided by the prover. + // + // 8 points are sent as it is enough to uniquely identify the polynomial + let read_off := SUMCHECK_UNIVARIATE_0_0_LOC + let write_off := SUM_U_CHALLENGE_0 + for {} lt(read_off, QM_EVAL_LOC) {} { + // Increase by 20 * batched relation length (8) + // 0x20 * 0x8 = 0x100 + mcopy(0x20, read_off, 0x100) + + // Hash 0x100 + 0x20 (prev hash) = 0x120 + prev_challenge := mod(keccak256(0x00, 0x120), p) + mstore(0x00, prev_challenge) + + let sumcheck_u_challenge := and(prev_challenge, LOWER_128_MASK) + mstore(write_off, sumcheck_u_challenge) + + // Progress read / write pointers + read_off := add(read_off, 0x100) + write_off := add(write_off, 0x20) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* RHO CHALLENGES */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // The RHO challenge is the hash of the evaluations of all of the wire values + // As per usual, it includes the previous challenge + // Evaluations of the following wires and their shifts (for relevant wires): + // - QM + // - QC + // - Q1 (QL) + // - Q2 (QR) + // - Q3 (QO) + // - Q4 + // - QLOOKUP + // - QARITH + // - QRANGE + // - QELLIPTIC + // - QMEMORY + // - QNNF (NNF = Non Native Field) + // - QPOSEIDON2_EXTERNAL + // - QPOSEIDON2_INTERNAL + // - SIGMA1 + // - SIGMA2 + // - SIGMA3 + // - SIGMA4 + // - ID1 + // - ID2 + // - ID3 + // - ID4 + // - TABLE1 + // - TABLE2 + // - TABLE3 + // - TABLE4 + // - W1 (WL) + // - W2 (WR) + // - W3 (WO) + // - W4 + // - Z_PERM + // - LOOKUP_INVERSES + // - LOOKUP_READ_COUNTS + // - LOOKUP_READ_TAGS + // - W1_SHIFT + // - W2_SHIFT + // - W3_SHIFT + // - W4_SHIFT + // - Z_PERM_SHIFT + // + // Hash of all of the above evaluations + // Number of bytes to copy = 0x20 * NUMBER_OF_ENTITIES (41) = 0x520 + mcopy(0x20, QM_EVAL_LOC, 0x520) + prev_challenge := mod(keccak256(0x00, 0x540), p) + mstore(0x00, prev_challenge) + + let rho := and(prev_challenge, LOWER_128_MASK) + + mstore(RHO_CHALLENGE, rho) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* GEMINI R CHALLENGE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // The Gemini R challenge contains a of all of commitments to all of the univariates + // evaluated in the Gemini Protocol + // So for multivariate polynomials in l variables, we will hash l - 1 commitments. + // For this implementation, we have logN number of of rounds and thus logN - 1 committments + // The format of these commitments are proof points, which are explained above + // 0x40 * (logN - 1) + + mcopy(0x20, GEMINI_FOLD_UNIVARIATE_0_X_LOC, {{ GEMINI_FOLD_UNIVARIATE_LENGTH }}) + + prev_challenge := mod(keccak256(0x00, {{ GEMINI_FOLD_UNIVARIATE_HASH_LENGTH }}), p) + mstore(0x00, prev_challenge) + + let geminiR := and(prev_challenge, LOWER_128_MASK) + + mstore(GEMINI_R_CHALLENGE, geminiR) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SHPLONK NU CHALLENGE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // The shplonk nu challenge hashes the evaluations of the above gemini univariates + // 0x20 * logN = 0x20 * 15 = 0x1e0 + + mcopy(0x20, GEMINI_A_EVAL_0, {{ GEMINI_EVALS_LENGTH }}) + prev_challenge := mod(keccak256(0x00, {{ GEMINI_EVALS_HASH_LENGTH }}), p) + mstore(0x00, prev_challenge) + + let shplonkNu := and(prev_challenge, LOWER_128_MASK) + mstore(SHPLONK_NU_CHALLENGE, shplonkNu) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SHPLONK Z CHALLENGE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // Generate Shplonk Z + // Hash of the single shplonk Q commitment + mcopy(0x20, SHPLONK_Q_X_LOC, 0x40) + prev_challenge := mod(keccak256(0x00, 0x60), p) + + let shplonkZ := and(prev_challenge, LOWER_128_MASK) + mstore(SHPLONK_Z_CHALLENGE, shplonkZ) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* CHALLENGES COMPLETE */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PUBLIC INPUT DELTA */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + /** + * Generate public inputa delta + * + * The public inputs delta leverages plonk's copy constraints in order to + * evaluate public inputs. + * + * For each row of the execution trace, the prover will calculate the following value + * There are 4 witness wires, 4 id wires and 4 sigma wires in this instantiation of the proof system + * So there will be 4 groups of wires (w_i, id_i and sigma_i) + * + * (w_0 + β(id_0) + γ) * ∏(w_1 + β(id_1) + γ) * ∏(w_2 + β(id_2) + γ) * ∏(w_3 + β(id_3) + γ) + * ∏------------------------------------------------------------------------------------------ * public_inputs_delta + * (w_0 + β(σ_0) + γ) * ∏(w_1 + β(σ_1) + γ) * ∏(w_2 + β(σ_2) + γ) * ∏(w_3 + β(σ_3) + γ) + * + * The above product is accumulated for all rows in the trace. + * + * The above equation enforces that for each cell in the trace, if the id and sigma pair are equal, then the + * witness value in that cell is equal. + * + * We extra terms to add to this product that correspond to public input values. + * + * The values of id_i and σ_i polynomials are related to a generalized PLONK permutation argument, in the original paper, there + * were no id_i polynomials. + * + * These are required under the multilinear setting as we cannot use cosets of the roots of unity to represent unique sets, rather + * we just use polynomials that include unique values. In implementation, id_0 can be {0 .. n} and id_1 can be {n .. 2n} and so forth. + * + */ + { + let beta := mload(BETA_CHALLENGE) + let gamma := mload(GAMMA_CHALLENGE) + let pub_off := PUBLIC_INPUTS_OFFSET + + let numerator_value := 1 + let denominator_value := 1 + + let p_clone := p // move p to the front of the stack + + // Assume offset is less than p + // numerator_acc = gamma + (beta * (PERMUTATION_ARGUMENT_VALUE_SEPARATOR + offset)) + let numerator_acc := + addmod(gamma, mulmod(beta, add(PERMUTATION_ARGUMENT_VALUE_SEPARATOR, pub_off), p_clone), p_clone) + // demonimator_acc = gamma - (beta * (offset + 1)) + let beta_x_off := mulmod(beta, add(pub_off, 1), p_clone) + let denominator_acc := addmod(gamma, sub(p_clone, beta_x_off), p_clone) + + let valid_inputs := true + // Load the starting point of the public inputs (jump over the selector and the length of public inputs [0x24]) + let public_inputs_ptr := add(calldataload(0x24), 0x24) + + // endpoint_ptr = public_inputs_ptr + num_inputs * 0x20. // every public input is 0x20 bytes + let endpoint_ptr := add(public_inputs_ptr, mul(REAL_NUMBER_PUBLIC_INPUTS, 0x20)) + + for {} lt(public_inputs_ptr, endpoint_ptr) { public_inputs_ptr := add(public_inputs_ptr, 0x20) } { + // Get public inputs from calldata + let input := calldataload(public_inputs_ptr) + + valid_inputs := and(valid_inputs, lt(input, p_clone)) + + numerator_value := mulmod(numerator_value, addmod(numerator_acc, input, p_clone), p_clone) + denominator_value := mulmod(denominator_value, addmod(denominator_acc, input, p_clone), p_clone) + + numerator_acc := addmod(numerator_acc, beta, p_clone) + denominator_acc := addmod(denominator_acc, sub(p_clone, beta), p_clone) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PUBLIC INPUT DELTA - Pairing points accum */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // Pairing points contribution to public inputs delta + let pairing_points_ptr := PAIRING_POINT_0 + for {} lt(pairing_points_ptr, W_L_X_LOC) { pairing_points_ptr := add(pairing_points_ptr, 0x20) } { + let input := mload(pairing_points_ptr) + + numerator_value := mulmod(numerator_value, addmod(numerator_acc, input, p_clone), p_clone) + denominator_value := mulmod(denominator_value, addmod(denominator_acc, input, p_clone), p_clone) + + numerator_acc := addmod(numerator_acc, beta, p_clone) + denominator_acc := addmod(denominator_acc, sub(p_clone, beta), p_clone) + } + + // Revert if not all public inputs are field elements (i.e. < p) + if iszero(valid_inputs) { + mstore(0x00, PUBLIC_INPUT_TOO_LARGE_SELECTOR) + revert(0x00, 0x04) + } + + mstore(PUBLIC_INPUTS_DELTA_NUMERATOR_CHALLENGE, numerator_value) + mstore(PUBLIC_INPUTS_DELTA_DENOMINATOR_CHALLENGE, denominator_value) + + // TODO: batch with barycentric inverses + let dom_inverse := 0 + { + mstore(0, 0x20) + mstore(0x20, 0x20) + mstore(0x40, 0x20) + mstore(0x60, denominator_value) + mstore(0x80, P_SUB_2) + mstore(0xa0, p) + if iszero(staticcall(gas(), 0x05, 0x00, 0xc0, 0x00, 0x20)) { + mstore(0x00, MODEXP_FAILED_SELECTOR) + revert(0x00, 0x04) + } + // 1 / (0 . 1 . 2 . 3 . 4 . 5 . 6 . 7) + dom_inverse := mload(0x00) + } + // Calculate the public inputs delta + mstore(PUBLIC_INPUTS_DELTA_NUMERATOR_CHALLENGE, mulmod(numerator_value, dom_inverse, p)) + } + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PUBLIC INPUT DELTA - complete */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SUMCHECK */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // + // Sumcheck is used to prove that every relation 0 on each row of the witness. + // + // Given each of the columns of our trace is a multilinear polynomial 𝑃1,…,𝑃𝑁∈𝔽[𝑋0,…,𝑋𝑑−1]. We run sumcheck over the polynomial + // + // 𝐹̃ (𝑋0,…,𝑋𝑑−1)=𝑝𝑜𝑤𝛽(𝑋0,…,𝑋𝑑−1)⋅𝐹(𝑃1(𝑋0,…,𝑋𝑑−1),…,𝑃𝑁(𝑋0,…,𝑋𝑑−1)) + // + // The Pow polynomial is a random polynomial that allows us to ceritify that the relations sum to 0 on each row of the witness, + // rather than the entire sum just targeting 0. + // + // Each polynomial P in our implementation are the polys in the proof and the verification key. (W_1, W_2, W_3, W_4, Z_PERM, etc....) + // + // We start with a LOG_N variate multilinear polynomial, each round fixes a variable to a challenge value. + // Each round the prover sends a round univariate poly, since the degree of our honk relations is 7 + the pow polynomial the prover + // sends a degree-8 univariate on each round. + // This is sent efficiently by sending 8 values, enough to represent a unique polynomial. + // Barycentric evaluation is used to evaluate the polynomial at any point on the domain, given these 8 unique points. + // + // In the sumcheck protocol, the target sum for each round is the sum of the round univariate evaluated on 0 and 1. + // 𝜎𝑖=?𝑆̃ 𝑖(0)+𝑆̃ 𝑖(1) + // This is efficiently checked as S(0) and S(1) are sent by the prover as values of the round univariate. + // + // We compute the next challenge by evaluating the round univariate at a random challenge value. + // 𝜎𝑖+1←𝑆̃ 𝑖(𝑢𝑖) + // This evaluation is performed via barycentric evaluation. + // + // Once we have reduced the multilinear polynomials into single dimensional polys, we check the entire sumcheck relation matches the target sum. + // + // Below this is composed of 8 relations: + // 1. Arithmetic relation - constrains arithmetic + // 2. Permutaiton Relation - efficiently encodes copy constraints + // 3. Log Derivative Lookup Relation - used for lookup operations + // 4. Delta Range Relation - used for efficient range checks + // 5. Memory Relation - used for efficient memory operations + // 6. NNF Relation - used for efficient Non Native Field operations + // 7. Poseidon2 External Relation - used for efficient in-circuit hashing + // 8. Poseidon2 Internal Relation - used for efficient in-circuit hashing + // + // These are batched together and evaluated at the same time using the alpha challenges. + // + { + // We write the barycentric domain values into memory + // These are written once per program execution, and reused across all + // sumcheck rounds + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_0_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_0) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_1_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_1) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_2_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_2) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_3_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_3) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_4_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_4) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_5_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_5) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_6_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_6) + mstore(BARYCENTRIC_LAGRANGE_DENOMINATOR_7_LOC, BARYCENTRIC_LAGRANGE_DENOMINATOR_7) + + // Compute the target sums for each round of sumcheck + { + // This requires the barycentric inverses to be computed for each round + // Write all of the non inverted barycentric denominators into memory + let accumulator := 1 + let temp := LATER_SCRATCH_SPACE + let bary_centric_inverses_off := BARYCENTRIC_DENOMINATOR_INVERSES_0_0_LOC + { + let round_challenge_off := SUM_U_CHALLENGE_0 + for { let round := 0 } lt(round, LOG_N) { round := add(round, 1) } { + let round_challenge := mload(round_challenge_off) + let bary_lagrange_denominator_off := BARYCENTRIC_LAGRANGE_DENOMINATOR_0_LOC + + // Unrolled as this loop as it only has 8 iterations + { + let bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + let pre_inv := + mulmod( + bary_lagrange_denominator, + addmod(round_challenge, p, p), // sub(p, 0) = p + p + ) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 1 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_1, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 2 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_2, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 3 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_3, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 4 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_4, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 5 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_5, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 6 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_6, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + + // barycentric_index = 7 + bary_lagrange_denominator := mload(bary_lagrange_denominator_off) + pre_inv := mulmod(bary_lagrange_denominator, addmod(round_challenge, P_SUB_7, p), p) + mstore(bary_centric_inverses_off, pre_inv) + temp := add(temp, 0x20) + mstore(temp, accumulator) + accumulator := mulmod(accumulator, pre_inv, p) + + // increase offsets + bary_lagrange_denominator_off := add(bary_lagrange_denominator_off, 0x20) + bary_centric_inverses_off := add(bary_centric_inverses_off, 0x20) + } + round_challenge_off := add(round_challenge_off, 0x20) + } + } + + // Invert all of the barycentric denominators as a single batch + { + { + mstore(0, 0x20) + mstore(0x20, 0x20) + mstore(0x40, 0x20) + mstore(0x60, accumulator) + mstore(0x80, P_SUB_2) + mstore(0xa0, p) + if iszero(staticcall(gas(), 0x05, 0x00, 0xc0, 0x00, 0x20)) { + mstore(0x00, MODEXP_FAILED_SELECTOR) + revert(0x00, 0x04) + } + + accumulator := mload(0x00) + } + + // Normalise as last loop will have incremented the offset + bary_centric_inverses_off := sub(bary_centric_inverses_off, 0x20) + for {} gt(bary_centric_inverses_off, BARYCENTRIC_LAGRANGE_DENOMINATOR_7_LOC) { + bary_centric_inverses_off := sub(bary_centric_inverses_off, 0x20) + } { + let tmp := mulmod(accumulator, mload(temp), p) + accumulator := mulmod(accumulator, mload(bary_centric_inverses_off), p) + mstore(bary_centric_inverses_off, tmp) + + temp := sub(temp, 0x20) + } + } + } + + let valid := true + let round_target := 0 + let pow_partial_evaluation := 1 + let gate_challenge_off := GATE_CHALLENGE_0 + let round_univariates_off := SUMCHECK_UNIVARIATE_0_0_LOC + + let challenge_off := SUM_U_CHALLENGE_0 + let bary_inverses_off := BARYCENTRIC_DENOMINATOR_INVERSES_0_0_LOC + + for { let round := 0 } lt(round, LOG_N) { round := add(round, 1) } { + let round_challenge := mload(challenge_off) + + // Total sum = u[0] + u[1] + let total_sum := addmod(mload(round_univariates_off), mload(add(round_univariates_off, 0x20)), p) + valid := and(valid, eq(total_sum, round_target)) + + // Compute next target sum + let numerator_value := round_challenge + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_1, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_2, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_3, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_4, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_5, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_6, p), p) + numerator_value := mulmod(numerator_value, addmod(round_challenge, P_SUB_7, p), p) + + // // Compute the next round target + round_target := 0 + for { let i := 0 } lt(i, BATCHED_RELATION_PARTIAL_LENGTH) { i := add(i, 1) } { + let term := mload(round_univariates_off) + let inverse := mload(bary_inverses_off) + + term := mulmod(term, inverse, p) + round_target := addmod(round_target, term, p) + round_univariates_off := add(round_univariates_off, 0x20) + bary_inverses_off := add(bary_inverses_off, 0x20) + } + + round_target := mulmod(round_target, numerator_value, p) + + // Partially evaluate POW + let gate_challenge := mload(gate_challenge_off) + let gate_challenge_minus_one := sub(gate_challenge, 1) + + let univariate_evaluation := addmod(1, mulmod(round_challenge, gate_challenge_minus_one, p), p) + + pow_partial_evaluation := mulmod(pow_partial_evaluation, univariate_evaluation, p) + + gate_challenge_off := add(gate_challenge_off, 0x20) + challenge_off := add(challenge_off, 0x20) + } + + if iszero(valid) { + mstore(0x00, SUMCHECK_FAILED_SELECTOR) + revert(0x00, 0x04) + } + + // The final sumcheck round; accumulating evaluations + // Uses pow partial evaluation as the gate scaling factor + + mstore(POW_PARTIAL_EVALUATION_LOC, pow_partial_evaluation) + mstore(FINAL_ROUND_TARGET_LOC, round_target) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* LOGUP RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + /** + * The basic arithmetic gate identity in standard plonk is as follows. + * (w_1 . w_2 . q_m) + (w_1 . q_1) + (w_2 . q_2) + (w_3 . q_3) + (w_4 . q_4) + q_c = 0 + * However, for Ultraplonk, we extend this to support "passing" wires between rows (shown without alpha scaling below): + * q_arith * ( ( (-1/2) * (q_arith - 3) * q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c ) + + * (q_arith - 1)*( α * (q_arith - 2) * (w_1 + w_4 - w_1_omega + q_m) + w_4_omega) ) = 0 + * + * This formula results in several cases depending on q_arith: + * 1. q_arith == 0: Arithmetic gate is completely disabled + * + * 2. q_arith == 1: Everything in the minigate on the right is disabled. The equation is just a standard plonk equation + * with extra wires: q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c = 0 + * + * 3. q_arith == 2: The (w_1 + w_4 - ...) term is disabled. THe equation is: + * (1/2) * q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + w_4_omega = 0 + * It allows defining w_4 at next index (w_4_omega) in terms of current wire values + * + * 4. q_arith == 3: The product of w_1 and w_2 is disabled, but a mini addition gate is enabled. α allows us to split + * the equation into two: + * + * q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + 2 * w_4_omega = 0 + * and + * w_1 + w_4 - w_1_omega + q_m = 0 (we are reusing q_m here) + * + * 5. q_arith > 3: The product of w_1 and w_2 is scaled by (q_arith - 3), while the w_4_omega term is scaled by (q_arith - 1). + * The equation can be split into two: + * + * (q_arith - 3)* q_m * w_1 * w_ 2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + (q_arith - 1) * w_4_omega = 0 + * and + * w_1 + w_4 - w_1_omega + q_m = 0 + * + * The problem that q_m is used both in both equations can be dealt with by appropriately changing selector values at + * the next gate. Then we can treat (q_arith - 1) as a simulated q_6 selector and scale q_m to handle (q_arith - 3) at + * product. + */ + let w1q1 := mulmod(mload(W1_EVAL_LOC), mload(QL_EVAL_LOC), p) + let w2q2 := mulmod(mload(W2_EVAL_LOC), mload(QR_EVAL_LOC), p) + let w3q3 := mulmod(mload(W3_EVAL_LOC), mload(QO_EVAL_LOC), p) + let w4q3 := mulmod(mload(W4_EVAL_LOC), mload(Q4_EVAL_LOC), p) + + let q_arith := mload(QARITH_EVAL_LOC) + // w1w2qm := (w_1 . w_2 . q_m . (QARITH_EVAL_LOC - 3)) / 2 + let w1w2qm := + mulmod( + mulmod( + mulmod(mulmod(mload(W1_EVAL_LOC), mload(W2_EVAL_LOC), p), mload(QM_EVAL_LOC), p), + addmod(q_arith, P_SUB_3, p), + p + ), + NEG_HALF_MODULO_P, + p + ) + + // (w_1 . w_2 . q_m . (q_arith - 3)) / -2) + (w_1 . q_1) + (w_2 . q_2) + (w_3 . q_3) + (w_4 . q_4) + q_c + let identity := + addmod( + mload(QC_EVAL_LOC), + addmod(w4q3, addmod(w3q3, addmod(w2q2, addmod(w1q1, w1w2qm, p), p), p), p), + p + ) + + // if q_arith == 3 we evaluate an additional mini addition gate (on top of the regular one), where: + // w_1 + w_4 - w_1_omega + q_m = 0 + // we use this gate to save an addition gate when adding or subtracting non-native field elements + // α * (q_arith - 2) * (w_1 + w_4 - w_1_omega + q_m) + let extra_small_addition_gate_identity := + mulmod( + addmod(q_arith, P_SUB_2, p), + addmod( + mload(QM_EVAL_LOC), + addmod( + sub(p, mload(W1_SHIFT_EVAL_LOC)), addmod(mload(W1_EVAL_LOC), mload(W4_EVAL_LOC), p), p + ), + p + ), + p + ) + + // Split up the two relations + let contribution_0 := + addmod(identity, mulmod(addmod(q_arith, P_SUB_1, p), mload(W4_SHIFT_EVAL_LOC), p), p) + contribution_0 := mulmod(mulmod(contribution_0, q_arith, p), mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_0_LOC, contribution_0) + + let contribution_1 := mulmod(extra_small_addition_gate_identity, addmod(q_arith, P_SUB_1, p), p) + contribution_1 := mulmod(contribution_1, q_arith, p) + contribution_1 := mulmod(contribution_1, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_1_LOC, contribution_1) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PERMUTATION RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + let beta := mload(BETA_CHALLENGE) + let gamma := mload(GAMMA_CHALLENGE) + + /** + * t1 = (W1 + gamma + beta * ID1) * (W2 + gamma + beta * ID2) + * t2 = (W3 + gamma + beta * ID3) * (W4 + gamma + beta * ID4) + * gp_numerator = t1 * t2 + * t1 = (W1 + gamma + beta * sigma_1_eval) * (W2 + gamma + beta * sigma_2_eval) + * t2 = (W2 + gamma + beta * sigma_3_eval) * (W3 + gamma + beta * sigma_4_eval) + * gp_denominator = t1 * t2 + */ + let t1 := + mulmod( + add(add(mload(W1_EVAL_LOC), gamma), mulmod(beta, mload(ID1_EVAL_LOC), p)), + add(add(mload(W2_EVAL_LOC), gamma), mulmod(beta, mload(ID2_EVAL_LOC), p)), + p + ) + let t2 := + mulmod( + add(add(mload(W3_EVAL_LOC), gamma), mulmod(beta, mload(ID3_EVAL_LOC), p)), + add(add(mload(W4_EVAL_LOC), gamma), mulmod(beta, mload(ID4_EVAL_LOC), p)), + p + ) + let numerator := mulmod(t1, t2, p) + t1 := + mulmod( + add(add(mload(W1_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA1_EVAL_LOC), p)), + add(add(mload(W2_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA2_EVAL_LOC), p)), + p + ) + t2 := + mulmod( + add(add(mload(W3_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA3_EVAL_LOC), p)), + add(add(mload(W4_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA4_EVAL_LOC), p)), + p + ) + let denominator := mulmod(t1, t2, p) + + { + let acc := + mulmod(addmod(mload(Z_PERM_EVAL_LOC), mload(LAGRANGE_FIRST_EVAL_LOC), p), numerator, p) + + acc := + addmod( + acc, + sub( + p, + mulmod( + addmod( + mload(Z_PERM_SHIFT_EVAL_LOC), + mulmod( + mload(LAGRANGE_LAST_EVAL_LOC), + mload(PUBLIC_INPUTS_DELTA_NUMERATOR_CHALLENGE), + p + ), + p + ), + denominator, + p + ) + ), + p + ) + + acc := mulmod(acc, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_2_LOC, acc) + + acc := + mulmod( + mulmod(mload(LAGRANGE_LAST_EVAL_LOC), mload(Z_PERM_SHIFT_EVAL_LOC), p), + mload(POW_PARTIAL_EVALUATION_LOC), + p + ) + mstore(SUBRELATION_EVAL_3_LOC, acc) + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* LOGUP WIDGET EVALUATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + let eta := mload(ETA_CHALLENGE) + let eta_two := mload(ETA_TWO_CHALLENGE) + let eta_three := mload(ETA_THREE_CHALLENGE) + + let beta := mload(BETA_CHALLENGE) + let gamma := mload(GAMMA_CHALLENGE) + + let t0 := + addmod(addmod(mload(TABLE1_EVAL_LOC), gamma, p), mulmod(mload(TABLE2_EVAL_LOC), eta, p), p) + let t1 := + addmod(mulmod(mload(TABLE3_EVAL_LOC), eta_two, p), mulmod(mload(TABLE4_EVAL_LOC), eta_three, p), p) + let write_term := addmod(t0, t1, p) + + t0 := + addmod( + addmod(mload(W1_EVAL_LOC), gamma, p), mulmod(mload(QR_EVAL_LOC), mload(W1_SHIFT_EVAL_LOC), p), p + ) + t1 := addmod(mload(W2_EVAL_LOC), mulmod(mload(QM_EVAL_LOC), mload(W2_SHIFT_EVAL_LOC), p), p) + let t2 := addmod(mload(W3_EVAL_LOC), mulmod(mload(QC_EVAL_LOC), mload(W3_SHIFT_EVAL_LOC), p), p) + + let read_term := addmod(t0, mulmod(t1, eta, p), p) + read_term := addmod(read_term, mulmod(t2, eta_two, p), p) + read_term := addmod(read_term, mulmod(mload(QO_EVAL_LOC), eta_three, p), p) + + let read_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), write_term, p) + let write_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), read_term, p) + + let inverse_exists_xor := addmod(mload(LOOKUP_READ_TAGS_EVAL_LOC), mload(QLOOKUP_EVAL_LOC), p) + inverse_exists_xor := + addmod( + inverse_exists_xor, + sub(p, mulmod(mload(LOOKUP_READ_TAGS_EVAL_LOC), mload(QLOOKUP_EVAL_LOC), p)), + p + ) + + let accumulator_none := mulmod(mulmod(read_term, write_term, p), mload(LOOKUP_INVERSES_EVAL_LOC), p) + accumulator_none := addmod(accumulator_none, sub(p, inverse_exists_xor), p) + accumulator_none := mulmod(accumulator_none, mload(POW_PARTIAL_EVALUATION_LOC), p) + + let accumulator_one := mulmod(mload(QLOOKUP_EVAL_LOC), read_inverse, p) + accumulator_one := + addmod(accumulator_one, sub(p, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), write_inverse, p)), p) + + let read_tag := mload(LOOKUP_READ_TAGS_EVAL_LOC) + let read_tag_boolean_relation := mulmod(read_tag, addmod(read_tag, P_SUB_1, p), p) + read_tag_boolean_relation := mulmod(read_tag_boolean_relation, mload(POW_PARTIAL_EVALUATION_LOC), p) + + mstore(SUBRELATION_EVAL_4_LOC, accumulator_none) + mstore(SUBRELATION_EVAL_5_LOC, accumulator_one) + mstore(SUBRELATION_EVAL_6_LOC, read_tag_boolean_relation) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* DELTA RANGE RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + // TODO(md): optimise the calculations + let minus_one := P_SUB_1 + let minus_two := P_SUB_2 + let minus_three := P_SUB_3 + + let delta_1 := addmod(mload(W2_EVAL_LOC), sub(p, mload(W1_EVAL_LOC)), p) + let delta_2 := addmod(mload(W3_EVAL_LOC), sub(p, mload(W2_EVAL_LOC)), p) + let delta_3 := addmod(mload(W4_EVAL_LOC), sub(p, mload(W3_EVAL_LOC)), p) + let delta_4 := addmod(mload(W1_SHIFT_EVAL_LOC), sub(p, mload(W4_EVAL_LOC)), p) + + { + let acc := delta_1 + acc := mulmod(acc, addmod(delta_1, minus_one, p), p) + acc := mulmod(acc, addmod(delta_1, minus_two, p), p) + acc := mulmod(acc, addmod(delta_1, minus_three, p), p) + acc := mulmod(acc, mload(QRANGE_EVAL_LOC), p) + acc := mulmod(acc, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_7_LOC, acc) + } + + { + let acc := delta_2 + acc := mulmod(acc, addmod(delta_2, minus_one, p), p) + acc := mulmod(acc, addmod(delta_2, minus_two, p), p) + acc := mulmod(acc, addmod(delta_2, minus_three, p), p) + acc := mulmod(acc, mload(QRANGE_EVAL_LOC), p) + acc := mulmod(acc, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_8_LOC, acc) + } + + { + let acc := delta_3 + acc := mulmod(acc, addmod(delta_3, minus_one, p), p) + acc := mulmod(acc, addmod(delta_3, minus_two, p), p) + acc := mulmod(acc, addmod(delta_3, minus_three, p), p) + acc := mulmod(acc, mload(QRANGE_EVAL_LOC), p) + acc := mulmod(acc, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_9_LOC, acc) + } + + { + let acc := delta_4 + acc := mulmod(acc, addmod(delta_4, minus_one, p), p) + acc := mulmod(acc, addmod(delta_4, minus_two, p), p) + acc := mulmod(acc, addmod(delta_4, minus_three, p), p) + acc := mulmod(acc, mload(QRANGE_EVAL_LOC), p) + acc := mulmod(acc, mload(POW_PARTIAL_EVALUATION_LOC), p) + mstore(SUBRELATION_EVAL_10_LOC, acc) + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* ELLIPTIC CURVE RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + // Contribution 10 point addition, x-coordinate check + // q_elliptic * (x3 + x2 + x1)(x2 - x1)(x2 - x1) - y2^2 - y1^2 + 2(y2y1)*q_sign = 0 + let x_diff := addmod(mload(EC_X_2), sub(p, mload(EC_X_1)), p) + let y1_sqr := mulmod(mload(EC_Y_1), mload(EC_Y_1), p) + { + let y2_sqr := mulmod(mload(EC_Y_2), mload(EC_Y_2), p) + let y1y2 := mulmod(mulmod(mload(EC_Y_1), mload(EC_Y_2), p), mload(EC_Q_SIGN), p) + let x_add_identity := addmod(mload(EC_X_3), addmod(mload(EC_X_2), mload(EC_X_1), p), p) + x_add_identity := mulmod(mulmod(x_add_identity, x_diff, p), x_diff, p) + x_add_identity := addmod(x_add_identity, sub(p, y2_sqr), p) + x_add_identity := addmod(x_add_identity, sub(p, y1_sqr), p) + x_add_identity := addmod(x_add_identity, y1y2, p) + x_add_identity := addmod(x_add_identity, y1y2, p) + + let eval := mulmod(x_add_identity, mload(POW_PARTIAL_EVALUATION_LOC), p) + eval := mulmod(eval, mload(QELLIPTIC_EVAL_LOC), p) + eval := mulmod(eval, addmod(1, sub(p, mload(EC_Q_IS_DOUBLE)), p), p) + mstore(SUBRELATION_EVAL_11_LOC, eval) + } + + { + let y1_plus_y3 := addmod(mload(EC_Y_1), mload(EC_Y_3), p) + let y_diff := mulmod(mload(EC_Y_2), mload(EC_Q_SIGN), p) + y_diff := addmod(y_diff, sub(p, mload(EC_Y_1)), p) + let y_add_identity := mulmod(y1_plus_y3, x_diff, p) + y_add_identity := + addmod(y_add_identity, mulmod(addmod(mload(EC_X_3), sub(p, mload(EC_X_1)), p), y_diff, p), p) + + let eval := mulmod(y_add_identity, mload(POW_PARTIAL_EVALUATION_LOC), p) + eval := mulmod(eval, mload(QELLIPTIC_EVAL_LOC), p) + eval := mulmod(eval, addmod(1, sub(p, mload(EC_Q_IS_DOUBLE)), p), p) + mstore(SUBRELATION_EVAL_12_LOC, eval) + } + + { + let x_pow_4 := mulmod(addmod(y1_sqr, GRUMPKIN_CURVE_B_PARAMETER_NEGATED, p), mload(EC_X_1), p) + let y1_sqr_mul_4 := addmod(y1_sqr, y1_sqr, p) + y1_sqr_mul_4 := addmod(y1_sqr_mul_4, y1_sqr_mul_4, p) + + let x1_pow_4_mul_9 := mulmod(x_pow_4, 9, p) + + let ep_x_double_identity := addmod(mload(EC_X_3), addmod(mload(EC_X_1), mload(EC_X_1), p), p) + ep_x_double_identity := mulmod(ep_x_double_identity, y1_sqr_mul_4, p) + ep_x_double_identity := addmod(ep_x_double_identity, sub(p, x1_pow_4_mul_9), p) + + let acc := mulmod(ep_x_double_identity, mload(POW_PARTIAL_EVALUATION_LOC), p) + acc := mulmod(mulmod(acc, mload(QELLIPTIC_EVAL_LOC), p), mload(EC_Q_IS_DOUBLE), p) + acc := addmod(acc, mload(SUBRELATION_EVAL_11_LOC), p) + + // Add to existing contribution - and double check that numbers here + mstore(SUBRELATION_EVAL_11_LOC, acc) + } + + { + let x1_sqr_mul_3 := + mulmod(addmod(addmod(mload(EC_X_1), mload(EC_X_1), p), mload(EC_X_1), p), mload(EC_X_1), p) + let y_double_identity := + mulmod(x1_sqr_mul_3, addmod(mload(EC_X_1), sub(p, mload(EC_X_3)), p), p) + y_double_identity := + addmod( + y_double_identity, + sub( + p, + mulmod( + addmod(mload(EC_Y_1), mload(EC_Y_1), p), addmod(mload(EC_Y_1), mload(EC_Y_3), p), p + ) + ), + p + ) + + let acc := mulmod(y_double_identity, mload(POW_PARTIAL_EVALUATION_LOC), p) + acc := mulmod(mulmod(acc, mload(QELLIPTIC_EVAL_LOC), p), mload(EC_Q_IS_DOUBLE), p) + acc := addmod(acc, mload(SUBRELATION_EVAL_12_LOC), p) + + // Add to existing contribution - and double check that numbers here + mstore(SUBRELATION_EVAL_12_LOC, acc) + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* MEMORY RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + { + /** + * MEMORY + * + * A RAM memory record contains a tuple of the following fields: + * * i: `index` of memory cell being accessed + * * t: `timestamp` of memory cell being accessed (used for RAM, set to 0 for ROM) + * * v: `value` of memory cell being accessed + * * a: `access` type of record. read: 0 = read, 1 = write + * * r: `record` of memory cell. record = access + index * eta + timestamp * eta_two + value * eta_three + * + * A ROM memory record contains a tuple of the following fields: + * * i: `index` of memory cell being accessed + * * v: `value1` of memory cell being accessed (ROM tables can store up to 2 values per index) + * * v2:`value2` of memory cell being accessed (ROM tables can store up to 2 values per index) + * * r: `record` of memory cell. record = index * eta + value2 * eta_two + value1 * eta_three + * + * When performing a read/write access, the values of i, t, v, v2, a, r are stored in the following wires + + * selectors, depending on whether the gate is a RAM read/write or a ROM read + * + * | gate type | i | v2/t | v | a | r | + * | --------- | -- | ----- | -- | -- | -- | + * | ROM | w1 | w2 | w3 | -- | w4 | + * | RAM | w1 | w2 | w3 | qc | w4 | + * + * (for accesses where `index` is a circuit constant, it is assumed the circuit will apply a copy constraint on + * `w2` to fix its value) + * + * + */ + + /** + * Memory Record Check + * Partial degree: 1 + * Total degree: 4 + * + * A ROM/ROM access gate can be evaluated with the identity: + * + * qc + w1 \eta + w2 \eta_two + w3 \eta_three - w4 = 0 + * + * For ROM gates, qc = 0 + */ + /** + * memory_record_check = w_3 * eta_three; + * memory_record_check += w_2 * eta_two; + * memory_record_check += w_1 * eta; + * memory_record_check += q_c; + * + * partial_record_check = memory_record_check; + * + * memory_record_check -= w_4; + */ + // TODO(md): update these - formula has changed with lower degree + let memory_record_check := mulmod(mload(W3_EVAL_LOC), mload(ETA_THREE_CHALLENGE), p) + memory_record_check := + addmod(memory_record_check, mulmod(mload(W2_EVAL_LOC), mload(ETA_TWO_CHALLENGE), p), p) + memory_record_check := + addmod(memory_record_check, mulmod(mload(W1_EVAL_LOC), mload(ETA_CHALLENGE), p), p) + memory_record_check := addmod(memory_record_check, mload(QC_EVAL_LOC), p) + + let partial_record_check := memory_record_check + memory_record_check := addmod(memory_record_check, sub(p, mload(W4_EVAL_LOC)), p) + + mstore(AUX_MEMORY_CHECK_IDENTITY, memory_record_check) + + /** + * ROM Consistency Check + * Partial degree: 1 + * Total degree: 4 + * + * For every ROM read, a set equivalence check is applied between the record witnesses, and a second set of + * records that are sorted. + * + * We apply the following checks for the sorted records: + * + * 1. w1, w2, w3 correctly map to 'index', 'v1, 'v2' for a given record value at w4 + * 2. index values for adjacent records are monotonically increasing + * 3. if, at gate i, index_i == index_{i + 1}, then value1_i == value1_{i + 1} and value2_i == value2_{i + 1} + * + */ + // index_delta = w_1_omega - w_1 + let index_delta := addmod(mload(W1_SHIFT_EVAL_LOC), sub(p, mload(W1_EVAL_LOC)), p) + + // record_delta = w_4_omega - w_4 + let record_delta := addmod(mload(W4_SHIFT_EVAL_LOC), sub(p, mload(W4_EVAL_LOC)), p) + + // index_is_monotonically_increasing = index_delta * (index_delta - 1) + let index_is_monotonically_increasing := mulmod(index_delta, addmod(index_delta, P_SUB_1, p), p) + + // adjacent_values_match_if_adjacent_indices_match = record_delta * (1 - index_delta) + let adjacent_values_match_if_adjacent_indices_match := + mulmod(record_delta, addmod(1, sub(p, index_delta), p), p) + + mstore( + SUBRELATION_EVAL_14_LOC, + mulmod( + adjacent_values_match_if_adjacent_indices_match, + mulmod( + mload(QL_EVAL_LOC), + mulmod( + mload(QR_EVAL_LOC), + mulmod(mload(QMEMORY_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p), + p + ), + p + ), + p + ) + ) + + // ROM_CONSISTENCY_CHECK_2 + mstore( + SUBRELATION_EVAL_15_LOC, + mulmod( + index_is_monotonically_increasing, + mulmod( + mload(QL_EVAL_LOC), + mulmod( + mload(QR_EVAL_LOC), + mulmod(mload(QMEMORY_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p), + p + ), + p + ), + p + ) + ) + + mstore( + AUX_ROM_CONSISTENCY_CHECK_IDENTITY, + mulmod(memory_record_check, mulmod(mload(QL_EVAL_LOC), mload(QR_EVAL_LOC), p), p) + ) + + { + /** + * RAM Consistency Check + * + * The 'access' type of the record is extracted with the expression `w_4 - ap.partial_record_check` + * (i.e. for an honest Prover `w1 * eta + w2 * eta^2 + w3 * eta^3 - w4 = access`. + * This is validated by requiring `access` to be boolean + * + * For two adjacent entries in the sorted list if _both_ + * A) index values match + * B) adjacent access value is 0 (i.e. next gate is a READ) + * then + * C) both values must match. + * The gate boolean check is + * (A && B) => C === !(A && B) || C === !A || !B || C + * + * N.B. it is the responsibility of the circuit writer to ensure that every RAM cell is initialized + * with a WRITE operation. + */ + /** + * next_gate_access_type = w_3_shift * eta_three; + * next_gate_access_type += (w_2_shift * eta_two); + * next_gate_access_type += (w_1_shift * eta); + * next_gate_access_type += w_4_shift; + * next_gate_access_type *= eta; + * next_gate_access_type = w_4_omega - next_gate_access_type; + */ + let next_gate_access_type := mulmod(mload(W3_SHIFT_EVAL_LOC), mload(ETA_THREE_CHALLENGE), p) + next_gate_access_type := + addmod( + next_gate_access_type, mulmod(mload(W2_SHIFT_EVAL_LOC), mload(ETA_TWO_CHALLENGE), p), p + ) + next_gate_access_type := + addmod(next_gate_access_type, mulmod(mload(W1_SHIFT_EVAL_LOC), mload(ETA_CHALLENGE), p), p) + next_gate_access_type := addmod(mload(W4_SHIFT_EVAL_LOC), sub(p, next_gate_access_type), p) + + // value_delta = w_3_omega - w_3 + let value_delta := addmod(mload(W3_SHIFT_EVAL_LOC), sub(p, mload(W3_EVAL_LOC)), p) + // adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation = (1 - index_delta) * value_delta * (1 - next_gate_access_type); + + let adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation := + mulmod( + addmod(1, sub(p, index_delta), p), + mulmod(value_delta, addmod(1, sub(p, next_gate_access_type), p), p), + p + ) + + // We can't apply the RAM consistency check identity on the final entry in the sorted list (the wires in the + // next gate would make the identity fail). We need to validate that its 'access type' bool is correct. Can't + // do with an arithmetic gate because of the `eta` factors. We need to check that the *next* gate's access + // type is correct, to cover this edge case + // deg 2 or 4 + /** + * access_type = w_4 - partial_record_check + * access_check = access_type^2 - access_type + * next_gate_access_type_is_boolean = next_gate_access_type^2 - next_gate_access_type + */ + let access_type := addmod(mload(W4_EVAL_LOC), sub(p, partial_record_check), p) + let access_check := mulmod(access_type, addmod(access_type, P_SUB_1, p), p) + let next_gate_access_type_is_boolean := + mulmod(next_gate_access_type, addmod(next_gate_access_type, P_SUB_1, p), p) + + // scaled_activation_selector = q_arith * q_aux * alpha + let scaled_activation_selector := + mulmod( + mload(QO_EVAL_LOC), + mulmod(mload(QMEMORY_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p), + p + ) + + mstore( + SUBRELATION_EVAL_16_LOC, + mulmod( + adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation, + scaled_activation_selector, + p + ) + ) + + mstore( + SUBRELATION_EVAL_17_LOC, + mulmod(index_is_monotonically_increasing, scaled_activation_selector, p) + ) + + mstore( + SUBRELATION_EVAL_18_LOC, + mulmod(next_gate_access_type_is_boolean, scaled_activation_selector, p) + ) + + mstore(AUX_RAM_CONSISTENCY_CHECK_IDENTITY, mulmod(access_check, mload(QO_EVAL_LOC), p)) + } + + { + // timestamp_delta = w_2_omega - w_2 + let timestamp_delta := addmod(mload(W2_SHIFT_EVAL_LOC), sub(p, mload(W2_EVAL_LOC)), p) + + // RAM_timestamp_check_identity = (1 - index_delta) * timestamp_delta - w_3 + let RAM_TIMESTAMP_CHECK_IDENTITY := + addmod( + mulmod(timestamp_delta, addmod(1, sub(p, index_delta), p), p), + sub(p, mload(W3_EVAL_LOC)), + p + ) + + /** + * memory_identity = ROM_consistency_check_identity; + * memory_identity += RAM_timestamp_check_identity * q_4; + * memory_identity += memory_record_check * q_m; + * memory_identity *= q_1; + * memory_identity += (RAM_consistency_check_identity * q_arith); + * + * auxiliary_identity = memory_identity + non_native_field_identity + limb_accumulator_identity; + * auxiliary_identity *= q_aux; + * auxiliary_identity *= alpha_base; + */ + let memory_identity := mload(AUX_ROM_CONSISTENCY_CHECK_IDENTITY) + memory_identity := + addmod( + memory_identity, + mulmod( + RAM_TIMESTAMP_CHECK_IDENTITY, mulmod(mload(Q4_EVAL_LOC), mload(QL_EVAL_LOC), p), p + ), + p + ) + + memory_identity := + addmod( + memory_identity, + mulmod( + mload(AUX_MEMORY_CHECK_IDENTITY), + mulmod(mload(QM_EVAL_LOC), mload(QL_EVAL_LOC), p), + p + ), + p + ) + memory_identity := addmod(memory_identity, mload(AUX_RAM_CONSISTENCY_CHECK_IDENTITY), p) + + memory_identity := + mulmod( + memory_identity, + mulmod(mload(QMEMORY_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p), + p + ) + mstore(SUBRELATION_EVAL_13_LOC, memory_identity) + } + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* NON NATIVE FIELD RELATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + /** + * Non native field arithmetic gate 2 + * _ _ + * / _ _ _ 14 \ + * q_2 . q_4 | (w_1 . w_2) + (w_1 . w_2) + (w_1 . w_4 + w_2 . w_3 - w_3) . 2 - w_3 - w_4 | + * \_ _/ + * + * limb_subproduct = w_1 . w_2_shift + w_1_shift . w_2 + * non_native_field_gate_2 = w_1 * w_4 + w_4 * w_3 - w_3_shift + * non_native_field_gate_2 = non_native_field_gate_2 * limb_size + * non_native_field_gate_2 -= w_4_shift + * non_native_field_gate_2 += limb_subproduct + * non_native_field_gate_2 *= q_4 + * limb_subproduct *= limb_size + * limb_subproduct += w_1_shift * w_2 + * non_native_field_gate_1 = (limb_subproduct + w_3 + w_4) * q_3 + * non_native_field_gate_3 = (limb_subproduct + w_4 - (w_3_shift + w_4_shift)) * q_m + * non_native_field_identity = (non_native_field_gate_1 + non_native_field_gate_2 + non_native_field_gate_3) * q_2 + */ + let limb_subproduct := + addmod( + mulmod(mload(W1_EVAL_LOC), mload(W2_SHIFT_EVAL_LOC), p), + mulmod(mload(W1_SHIFT_EVAL_LOC), mload(W2_EVAL_LOC), p), + p + ) + + let non_native_field_gate_2 := + addmod( + addmod( + mulmod(mload(W1_EVAL_LOC), mload(W4_EVAL_LOC), p), + mulmod(mload(W2_EVAL_LOC), mload(W3_EVAL_LOC), p), + p + ), + sub(p, mload(W3_SHIFT_EVAL_LOC)), + p + ) + non_native_field_gate_2 := mulmod(non_native_field_gate_2, LIMB_SIZE, p) + non_native_field_gate_2 := addmod(non_native_field_gate_2, sub(p, mload(W4_SHIFT_EVAL_LOC)), p) + non_native_field_gate_2 := addmod(non_native_field_gate_2, limb_subproduct, p) + non_native_field_gate_2 := mulmod(non_native_field_gate_2, mload(Q4_EVAL_LOC), p) + + limb_subproduct := mulmod(limb_subproduct, LIMB_SIZE, p) + limb_subproduct := + addmod(limb_subproduct, mulmod(mload(W1_SHIFT_EVAL_LOC), mload(W2_SHIFT_EVAL_LOC), p), p) + + let non_native_field_gate_1 := + mulmod( + addmod(limb_subproduct, sub(p, addmod(mload(W3_EVAL_LOC), mload(W4_EVAL_LOC), p)), p), + mload(QO_EVAL_LOC), + p + ) + + let non_native_field_gate_3 := + mulmod( + addmod( + addmod(limb_subproduct, mload(W4_EVAL_LOC), p), + sub(p, addmod(mload(W3_SHIFT_EVAL_LOC), mload(W4_SHIFT_EVAL_LOC), p)), + p + ), + mload(QM_EVAL_LOC), + p + ) + let non_native_field_identity := + mulmod( + addmod(addmod(non_native_field_gate_1, non_native_field_gate_2, p), non_native_field_gate_3, p), + mload(QR_EVAL_LOC), + p + ) + + mstore(AUX_NON_NATIVE_FIELD_IDENTITY, non_native_field_identity) + } + + { + /** + * limb_accumulator_1 = w_2_omega; + * limb_accumulator_1 *= SUBLIMB_SHIFT; + * limb_accumulator_1 += w_1_omega; + * limb_accumulator_1 *= SUBLIMB_SHIFT; + * limb_accumulator_1 += w_3; + * limb_accumulator_1 *= SUBLIMB_SHIFT; + * limb_accumulator_1 += w_2; + * limb_accumulator_1 *= SUBLIMB_SHIFT; + * limb_accumulator_1 += w_1; + * limb_accumulator_1 -= w_4; + * limb_accumulator_1 *= q_4; + */ + let limb_accumulator_1 := mulmod(mload(W2_SHIFT_EVAL_LOC), SUBLIMB_SHIFT, p) + limb_accumulator_1 := addmod(limb_accumulator_1, mload(W1_SHIFT_EVAL_LOC), p) + limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) + limb_accumulator_1 := addmod(limb_accumulator_1, mload(W3_EVAL_LOC), p) + limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) + limb_accumulator_1 := addmod(limb_accumulator_1, mload(W2_EVAL_LOC), p) + limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) + limb_accumulator_1 := addmod(limb_accumulator_1, mload(W1_EVAL_LOC), p) + limb_accumulator_1 := addmod(limb_accumulator_1, sub(p, mload(W4_EVAL_LOC)), p) + limb_accumulator_1 := mulmod(limb_accumulator_1, mload(Q4_EVAL_LOC), p) + + /** + * limb_accumulator_2 = w_3_omega; + * limb_accumulator_2 *= SUBLIMB_SHIFT; + * limb_accumulator_2 += w_2_omega; + * limb_accumulator_2 *= SUBLIMB_SHIFT; + * limb_accumulator_2 += w_1_omega; + * limb_accumulator_2 *= SUBLIMB_SHIFT; + * limb_accumulator_2 += w_4; + * limb_accumulator_2 *= SUBLIMB_SHIFT; + * limb_accumulator_2 += w_3; + * limb_accumulator_2 -= w_4_omega; + * limb_accumulator_2 *= q_m; + */ + let limb_accumulator_2 := mulmod(mload(W3_SHIFT_EVAL_LOC), SUBLIMB_SHIFT, p) + limb_accumulator_2 := addmod(limb_accumulator_2, mload(W2_SHIFT_EVAL_LOC), p) + limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) + limb_accumulator_2 := addmod(limb_accumulator_2, mload(W1_SHIFT_EVAL_LOC), p) + limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) + limb_accumulator_2 := addmod(limb_accumulator_2, mload(W4_EVAL_LOC), p) + limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) + limb_accumulator_2 := addmod(limb_accumulator_2, mload(W3_EVAL_LOC), p) + limb_accumulator_2 := addmod(limb_accumulator_2, sub(p, mload(W4_SHIFT_EVAL_LOC)), p) + limb_accumulator_2 := mulmod(limb_accumulator_2, mload(QM_EVAL_LOC), p) + + let limb_accumulator_identity := addmod(limb_accumulator_1, limb_accumulator_2, p) + limb_accumulator_identity := mulmod(limb_accumulator_identity, mload(QO_EVAL_LOC), p) + + let nnf_identity := addmod(mload(AUX_NON_NATIVE_FIELD_IDENTITY), limb_accumulator_identity, p) + nnf_identity := + mulmod(nnf_identity, mulmod(mload(QNNF_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p), p) + + mstore(SUBRELATION_EVAL_19_LOC, nnf_identity) + } + + /* + * Poseidon External Relation + */ + { + let s1 := addmod(mload(W1_EVAL_LOC), mload(QL_EVAL_LOC), p) + let s2 := addmod(mload(W2_EVAL_LOC), mload(QR_EVAL_LOC), p) + let s3 := addmod(mload(W3_EVAL_LOC), mload(QO_EVAL_LOC), p) + let s4 := addmod(mload(W4_EVAL_LOC), mload(Q4_EVAL_LOC), p) + + // u1 := s1 * s1 * s1 * s1 * s1; + let t0 := mulmod(s1, s1, p) + let u1 := mulmod(t0, mulmod(t0, s1, p), p) + + // u2 := s2 * s2 * s2 * s2 * s2; + t0 := mulmod(s2, s2, p) + let u2 := mulmod(t0, mulmod(t0, s2, p), p) + + // u3 := s3 * s3 * s3 * s3 * s3; + t0 := mulmod(s3, s3, p) + let u3 := mulmod(t0, mulmod(t0, s3, p), p) + + // u4 := s4 * s4 * s4 * s4 * s4; + t0 := mulmod(s4, s4, p) + let u4 := mulmod(t0, mulmod(t0, s4, p), p) + + // matrix mul v = M_E * u with 14 additions + t0 := addmod(u1, u2, p) + let t1 := addmod(u3, u4, p) + + let t2 := addmod(u2, u2, p) + t2 := addmod(t2, t1, p) + + let t3 := addmod(u4, u4, p) + t3 := addmod(t3, t0, p) + + let v4 := addmod(t1, t1, p) + v4 := addmod(v4, v4, p) + v4 := addmod(v4, t3, p) + + let v2 := addmod(t0, t0, p) + v2 := addmod(v2, v2, p) + v2 := addmod(v2, t2, p) + + let v1 := addmod(t3, v2, p) + let v3 := addmod(t2, v4, p) + + let q_pos_by_scaling := + mulmod(mload(QPOSEIDON2_EXTERNAL_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p) + + mstore( + SUBRELATION_EVAL_20_LOC, + mulmod(q_pos_by_scaling, addmod(v1, sub(p, mload(W1_SHIFT_EVAL_LOC)), p), p) + ) + + mstore( + SUBRELATION_EVAL_21_LOC, + mulmod(q_pos_by_scaling, addmod(v2, sub(p, mload(W2_SHIFT_EVAL_LOC)), p), p) + ) + + mstore( + SUBRELATION_EVAL_22_LOC, + mulmod(q_pos_by_scaling, addmod(v3, sub(p, mload(W3_SHIFT_EVAL_LOC)), p), p) + ) + + mstore( + SUBRELATION_EVAL_23_LOC, + mulmod(q_pos_by_scaling, addmod(v4, sub(p, mload(W4_SHIFT_EVAL_LOC)), p), p) + ) + } + + /* + * Poseidon Internal Relation + */ + { + let s1 := addmod(mload(W1_EVAL_LOC), mload(QL_EVAL_LOC), p) + + // apply s-box round + let t0 := mulmod(s1, s1, p) + let u1 := mulmod(t0, mulmod(t0, s1, p), p) + let u2 := mload(W2_EVAL_LOC) + let u3 := mload(W3_EVAL_LOC) + let u4 := mload(W4_EVAL_LOC) + + // matrix mul v = M_I * u 4 muls and 7 additions + let u_sum := addmod(u1, u2, p) + u_sum := addmod(u_sum, addmod(u3, u4, p), p) + + let q_pos_by_scaling := + mulmod(mload(QPOSEIDON2_INTERNAL_EVAL_LOC), mload(POW_PARTIAL_EVALUATION_LOC), p) + + let v1 := addmod(mulmod(u1, POS_INTERNAL_MATRIX_D_0, p), u_sum, p) + + mstore( + SUBRELATION_EVAL_24_LOC, + mulmod(q_pos_by_scaling, addmod(v1, sub(p, mload(W1_SHIFT_EVAL_LOC)), p), p) + ) + let v2 := addmod(mulmod(u2, POS_INTERNAL_MATRIX_D_1, p), u_sum, p) + + mstore( + SUBRELATION_EVAL_25_LOC, + mulmod(q_pos_by_scaling, addmod(v2, sub(p, mload(W2_SHIFT_EVAL_LOC)), p), p) + ) + let v3 := addmod(mulmod(u3, POS_INTERNAL_MATRIX_D_2, p), u_sum, p) + + mstore( + SUBRELATION_EVAL_26_LOC, + mulmod(q_pos_by_scaling, addmod(v3, sub(p, mload(W3_SHIFT_EVAL_LOC)), p), p) + ) + + let v4 := addmod(mulmod(u4, POS_INTERNAL_MATRIX_D_3, p), u_sum, p) + mstore( + SUBRELATION_EVAL_27_LOC, + mulmod(q_pos_by_scaling, addmod(v4, sub(p, mload(W4_SHIFT_EVAL_LOC)), p), p) + ) + } + + // Scale and batch subrelations by subrelation challenges + // linear combination of subrelations + let accumulator := mload(SUBRELATION_EVAL_0_LOC) + + // Below is an unrolled variant of the following loop + // for (uint256 i = 1; i < NUMBER_OF_SUBRELATIONS; ++i) { + // accumulator = accumulator + evaluations[i] * subrelationChallenges[i - 1]; + // } + + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_1_LOC), mload(ALPHA_CHALLENGE_0), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_2_LOC), mload(ALPHA_CHALLENGE_1), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_3_LOC), mload(ALPHA_CHALLENGE_2), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_4_LOC), mload(ALPHA_CHALLENGE_3), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_5_LOC), mload(ALPHA_CHALLENGE_4), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_6_LOC), mload(ALPHA_CHALLENGE_5), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_7_LOC), mload(ALPHA_CHALLENGE_6), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_8_LOC), mload(ALPHA_CHALLENGE_7), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_9_LOC), mload(ALPHA_CHALLENGE_8), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_10_LOC), mload(ALPHA_CHALLENGE_9), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_11_LOC), mload(ALPHA_CHALLENGE_10), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_12_LOC), mload(ALPHA_CHALLENGE_11), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_13_LOC), mload(ALPHA_CHALLENGE_12), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_14_LOC), mload(ALPHA_CHALLENGE_13), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_15_LOC), mload(ALPHA_CHALLENGE_14), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_16_LOC), mload(ALPHA_CHALLENGE_15), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_17_LOC), mload(ALPHA_CHALLENGE_16), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_18_LOC), mload(ALPHA_CHALLENGE_17), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_19_LOC), mload(ALPHA_CHALLENGE_18), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_20_LOC), mload(ALPHA_CHALLENGE_19), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_21_LOC), mload(ALPHA_CHALLENGE_20), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_22_LOC), mload(ALPHA_CHALLENGE_21), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_23_LOC), mload(ALPHA_CHALLENGE_22), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_24_LOC), mload(ALPHA_CHALLENGE_23), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_25_LOC), mload(ALPHA_CHALLENGE_24), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_26_LOC), mload(ALPHA_CHALLENGE_25), p), p) + accumulator := + addmod(accumulator, mulmod(mload(SUBRELATION_EVAL_27_LOC), mload(ALPHA_CHALLENGE_26), p), p) + + let sumcheck_valid := eq(accumulator, mload(FINAL_ROUND_TARGET_LOC)) + + if iszero(sumcheck_valid) { + mstore(0x00, SUMCHECK_FAILED_SELECTOR) + return(0x00, 0x20) + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SUMCHECK -- Complete */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SHPLEMINI */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + // Compute powers of evaluation challenge + let cache := mload(GEMINI_R_CHALLENGE) + let off := POWERS_OF_EVALUATION_CHALLENGE_0_LOC + mstore(off, cache) + + for { let i := 1 } lt(i, LOG_N) { i := add(i, 1) } { + off := add(off, 0x20) + cache := mulmod(cache, cache, p) + mstore(off, cache) + } + + // Compute Inverted Gemini Denominators + let eval_challenge := mload(SHPLONK_Z_CHALLENGE) + + // TO be inverted in the batch invert below + // TODO: maybe not needed to go in memory + mstore( + INVERTED_GEMINI_DENOMINATOR_0_LOC, + addmod(eval_challenge, sub(p, mload(POWERS_OF_EVALUATION_CHALLENGE_0_LOC)), p) + ) + + mstore( + POS_INVERTED_DENOM_0_LOC, addmod(eval_challenge, sub(p, mload(POWERS_OF_EVALUATION_CHALLENGE_0_LOC)), p) + ) + mstore(NEG_INVERTED_DENOM_0_LOC, addmod(eval_challenge, mload(POWERS_OF_EVALUATION_CHALLENGE_0_LOC), p)) + + // Compute Fold Pos Evaluatios + + // In order to compute fold pos evaluations we need + let store_off := INVERTED_CHALLENEGE_POW_MINUS_U_{{ LOG_N_MINUS_ONE }}_LOC + let pow_off := POWERS_OF_EVALUATION_CHALLENGE_{{ LOG_N_MINUS_ONE }}_LOC + let sumcheck_u_off := SUM_U_CHALLENGE_{{ LOG_N_MINUS_ONE }} + + // TODO: challengePower * (ONE - u) can be cached - measure performance + for { let i := LOG_N } gt(i, 0) { i := sub(i, 1) } { + let u := mload(sumcheck_u_off) + + let challPowerMulMinusU := mulmod(mload(pow_off), addmod(1, sub(p, u), p), p) + + mstore(store_off, addmod(challPowerMulMinusU, u, p)) + + store_off := sub(store_off, 0x20) + pow_off := sub(pow_off, 0x20) + sumcheck_u_off := sub(sumcheck_u_off, 0x20) + } + + // Compute + { + let pos_inverted_off := POS_INVERTED_DENOM_1_LOC + let neg_inverted_off := NEG_INVERTED_DENOM_1_LOC + pow_off := POWERS_OF_EVALUATION_CHALLENGE_1_LOC + + let shplonk_z := mload(SHPLONK_Z_CHALLENGE) + for { let i := 0 } lt(i, sub(LOG_N, 1)) { i := add(i, 1) } { + let pow := mload(pow_off) + + let pos_inv := addmod(shplonk_z, sub(p, pow), p) + mstore(pos_inverted_off, pos_inv) + + let neg_inv := addmod(shplonk_z, pow, p) + mstore(neg_inverted_off, neg_inv) + + pow_off := add(pow_off, 0x20) + pos_inverted_off := add(pos_inverted_off, 0x20) + neg_inverted_off := add(neg_inverted_off, 0x20) + } + } + + // To be inverted + // From: computeFoldPosEvaluations + // Series of challengePower * (ONE - u) + // gemini r challenge + // Inverted denominators + // (shplonkZ - powers of evaluaion challenge[i + 1]) + // (shplonkZ + powers of evaluation challenge[i + 1]) + + // Use scratch space for temps + + let accumulator := mload(GEMINI_R_CHALLENGE) + + /// {{ UNROLL_SECTION_START ACCUMULATE_INVERSES }} + /// {{UNROLL_SECTION_END ACCUMULATE_INVERSES }} + + { + mstore(0, 0x20) + mstore(0x20, 0x20) + mstore(0x40, 0x20) + mstore(0x60, accumulator) + mstore(0x80, P_SUB_2) + mstore(0xa0, p) + if iszero(staticcall(gas(), 0x05, 0x00, 0xc0, 0x00, 0x20)) { + mstore(0x00, MODEXP_FAILED_SELECTOR) + revert(0x00, 0x04) + } + accumulator := mload(0x00) + } + + /// {{ UNROLL_SECTION_START COLLECT_INVERSES }} + /// {{ UNROLL_SECTION_END COLLECT_INVERSES }} + + let inverted_gemini_r := accumulator + + let unshifted_scalar := 0 + let shifted_scalar := 0 + { + let pos_inverted_denominator := mload(POS_INVERTED_DENOM_0_LOC) + let neg_inverted_denominator := mload(NEG_INVERTED_DENOM_0_LOC) + let shplonk_nu := mload(SHPLONK_NU_CHALLENGE) + + unshifted_scalar := addmod(pos_inverted_denominator, mulmod(shplonk_nu, neg_inverted_denominator, p), p) + + // accumulator takes the value of `INVERTED_GEMINI_DENOMINATOR_0` here + shifted_scalar := + mulmod( + accumulator, // (1 / gemini_r_challenge) + // (inverse_vanishing_evals[0]) - (shplonk_nu * inverse_vanishing_evals[1]) + addmod( + pos_inverted_denominator, + // - (shplonk_nu * inverse_vanishing_evals[1]) + sub(p, mulmod(shplonk_nu, neg_inverted_denominator, p)), + p + ), + p + ) + } + + // TODO: Write a comment that describes the process of accumulating commitments and scalars + // into one large value that will be used on the rhs of the pairing check + + // Accumulators + let batching_challenge := 1 + let batched_evaluation := 0 + + let neg_unshifted_scalar := sub(p, unshifted_scalar) + let neg_shifted_scalar := sub(p, shifted_scalar) + + mstore(BATCH_SCALAR_0_LOC, 1) + let rho := mload(RHO_CHALLENGE) + + // Unrolled for the loop below - where NUMBER_UNSHIFTED = 36 + // for (uint256 i = 1; i <= NUMBER_UNSHIFTED; ++i) { + // scalars[i] = mem.unshiftedScalar.neg() * mem.batchingChallenge; + // mem.batchedEvaluation = mem.batchedEvaluation + (proof.sumcheckEvaluations[i - 1] * mem.batchingChallenge); + // mem.batchingChallenge = mem.batchingChallenge * tp.rho; + // } + + // Calculate the scalars and batching challenge for the unshifted entities + // 0: QM_EVAL_LOC + mstore(BATCH_SCALAR_1_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QM_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 1: QC_EVAL_LOC + mstore(BATCH_SCALAR_2_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QC_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 2: QL_EVAL_LOC + mstore(BATCH_SCALAR_3_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QL_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 3: QR_EVAL_LOC + mstore(BATCH_SCALAR_4_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QR_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 4: QO_EVAL_LOC + mstore(BATCH_SCALAR_5_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QO_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 5: Q4_EVAL_LOC + mstore(BATCH_SCALAR_6_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(Q4_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 6: QLOOKUP_EVAL_LOC + mstore(BATCH_SCALAR_7_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QLOOKUP_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 7: QARITH_EVAL_LOC + mstore(BATCH_SCALAR_8_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QARITH_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 8: QRANGE_EVAL_LOC + mstore(BATCH_SCALAR_9_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QRANGE_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 9: QELLIPTIC_EVAL_LOC + mstore(BATCH_SCALAR_10_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(QELLIPTIC_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 10: QMEMORY_EVAL_LOC + mstore(BATCH_SCALAR_11_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QMEMORY_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 11: QNNF_EVAL_LOC + mstore(BATCH_SCALAR_12_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(QNNF_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 12: QPOSEIDON2_EXTERNAL_EVAL_LOC + mstore(BATCH_SCALAR_13_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(QPOSEIDON2_EXTERNAL_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 13: QPOSEIDON2_INTERNAL_EVAL_LOC + mstore(BATCH_SCALAR_14_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(QPOSEIDON2_INTERNAL_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 14: SIGMA1_EVAL_LOC + mstore(BATCH_SCALAR_15_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(SIGMA1_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 15: SIGMA2_EVAL_LOC + mstore(BATCH_SCALAR_16_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(SIGMA2_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 16: SIGMA3_EVAL_LOC + mstore(BATCH_SCALAR_17_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(SIGMA3_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 17: SIGMA4_EVAL_LOC + mstore(BATCH_SCALAR_18_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(SIGMA4_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 18: ID1_EVAL_LOC + mstore(BATCH_SCALAR_19_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(ID1_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 19: ID2_EVAL_LOC + mstore(BATCH_SCALAR_20_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(ID2_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 20: ID3_EVAL_LOC + mstore(BATCH_SCALAR_21_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(ID3_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 21: ID4_EVAL_LOC + mstore(BATCH_SCALAR_22_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(ID4_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 22: TABLE1_EVAL_LOC + mstore(BATCH_SCALAR_23_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(TABLE1_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 23: TABLE2_EVAL_LOC + mstore(BATCH_SCALAR_24_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(TABLE2_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 24: TABLE3_EVAL_LOC + mstore(BATCH_SCALAR_25_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(TABLE3_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 25: TABLE4_EVAL_LOC + mstore(BATCH_SCALAR_26_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(TABLE4_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 26: LAGRANGE_FIRST_EVAL_LOC + mstore(BATCH_SCALAR_27_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(LAGRANGE_FIRST_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 27: LAGRANGE_LAST_EVAL_LOC + mstore(BATCH_SCALAR_28_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(LAGRANGE_LAST_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 28: W1_EVAL_LOC + mstore(BATCH_SCALAR_29_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W1_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 29: W2_EVAL_LOC + mstore(BATCH_SCALAR_30_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W2_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 30: W3_EVAL_LOC + mstore(BATCH_SCALAR_31_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W3_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 31: W4_EVAL_LOC + mstore(BATCH_SCALAR_32_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W4_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 32: Z_PERM_EVAL_LOC + mstore(BATCH_SCALAR_33_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(Z_PERM_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 33: LOOKUP_INVERSES_EVAL_LOC + mstore(BATCH_SCALAR_34_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 34: LOOKUP_READ_COUNTS_EVAL_LOC + mstore(BATCH_SCALAR_35_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 35: LOOKUP_READ_TAGS_EVAL_LOC + mstore(BATCH_SCALAR_36_LOC, mulmod(neg_unshifted_scalar, batching_challenge, p)) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(LOOKUP_READ_TAGS_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // Unrolled for NUMBER_OF_SHIFTED_ENTITIES = 5 + // for (uint256 i = NUMBER_UNSHIFTED + 1; i <= NUMBER_OF_ENTITIES; ++i) { + // scalars[i] = mem.shiftedScalar.neg() * mem.batchingChallenge; + // mem.batchedEvaluation = mem.batchedEvaluation + (proof.sumcheckEvaluations[i - 1] * mem.batchingChallenge); + // mem.batchingChallenge = mem.batchingChallenge * tp.rho; + // } + + // 28: W1_EVAL_LOC + mstore( + BATCH_SCALAR_29_LOC, + addmod(mload(BATCH_SCALAR_29_LOC), mulmod(neg_shifted_scalar, batching_challenge, p), p) + ) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W1_SHIFT_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 29: W2_EVAL_LOC + mstore( + BATCH_SCALAR_30_LOC, + addmod(mload(BATCH_SCALAR_30_LOC), mulmod(neg_shifted_scalar, batching_challenge, p), p) + ) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W2_SHIFT_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 30: W3_EVAL_LOC + mstore( + BATCH_SCALAR_31_LOC, + addmod(mload(BATCH_SCALAR_31_LOC), mulmod(neg_shifted_scalar, batching_challenge, p), p) + ) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W3_SHIFT_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 31: W4_EVAL_LOC + mstore( + BATCH_SCALAR_32_LOC, + addmod(mload(BATCH_SCALAR_32_LOC), mulmod(neg_shifted_scalar, batching_challenge, p), p) + ) + batched_evaluation := addmod(batched_evaluation, mulmod(mload(W4_SHIFT_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + // 32: Z_PERM_EVAL_LOC + mstore( + BATCH_SCALAR_33_LOC, + addmod(mload(BATCH_SCALAR_33_LOC), mulmod(neg_shifted_scalar, batching_challenge, p), p) + ) + batched_evaluation := + addmod(batched_evaluation, mulmod(mload(Z_PERM_SHIFT_EVAL_LOC), batching_challenge, p), p) + batching_challenge := mulmod(batching_challenge, rho, p) + + mstore(BATCHED_EVALUATION_LOC, batched_evaluation) + + // Compute fold pos evaluations + { + // TODO: work out the stack here + mstore(CHALL_POW_LOC, POWERS_OF_EVALUATION_CHALLENGE_{{ LOG_N_MINUS_ONE }}_LOC) + mstore(SUMCHECK_U_LOC, SUM_U_CHALLENGE_{{ LOG_N_MINUS_ONE }}) + mstore(GEMINI_A_LOC, GEMINI_A_EVAL_{{ LOG_N_MINUS_ONE }}) + // Inversion of this value was included in batch inversion above + let inverted_chall_pow_minus_u_loc := INVERTED_CHALLENEGE_POW_MINUS_U_{{ LOG_N_MINUS_ONE }}_LOC + let fold_pos_off := FOLD_POS_EVALUATIONS_{{ LOG_N_MINUS_ONE }}_LOC + + let batchedEvalAcc := batched_evaluation + for { let i := LOG_N } gt(i, 0) { i := sub(i, 1) } { + let chall_pow := mload(mload(CHALL_POW_LOC)) + let sum_check_u := mload(mload(SUMCHECK_U_LOC)) + + // challengePower * batchedEvalAccumulator * 2 + let batchedEvalRoundAcc := mulmod(chall_pow, mulmod(batchedEvalAcc, 2, p), p) + // (challengePower * (ONE - u) - u) + let chall_pow_times_1_minus_u := mulmod(chall_pow, addmod(1, sub(p, sum_check_u), p), p) + + batchedEvalRoundAcc := + addmod( + batchedEvalRoundAcc, + sub( + p, + mulmod( + mload(mload(GEMINI_A_LOC)), addmod(chall_pow_times_1_minus_u, sub(p, sum_check_u), p), p + ) + ), + p + ) + + batchedEvalRoundAcc := mulmod(batchedEvalRoundAcc, mload(inverted_chall_pow_minus_u_loc), p) + + batchedEvalAcc := batchedEvalRoundAcc + mstore(fold_pos_off, batchedEvalRoundAcc) + + mstore(CHALL_POW_LOC, sub(mload(CHALL_POW_LOC), 0x20)) + mstore(SUMCHECK_U_LOC, sub(mload(SUMCHECK_U_LOC), 0x20)) + mstore(GEMINI_A_LOC, sub(mload(GEMINI_A_LOC), 0x20)) + inverted_chall_pow_minus_u_loc := sub(inverted_chall_pow_minus_u_loc, 0x20) + fold_pos_off := sub(fold_pos_off, 0x20) + } + } + + let constant_term_acc := mulmod(mload(FOLD_POS_EVALUATIONS_0_LOC), mload(POS_INVERTED_DENOM_0_LOC), p) + { + let shplonk_nu := mload(SHPLONK_NU_CHALLENGE) + + constant_term_acc := + addmod( + constant_term_acc, + mulmod(mload(GEMINI_A_EVAL_0), mulmod(shplonk_nu, mload(NEG_INVERTED_DENOM_0_LOC), p), p), + p + ) + + let shplonk_nu_sqr := mulmod(shplonk_nu, shplonk_nu, p) + batching_challenge := shplonk_nu_sqr + + // TODO: improve scheduling + mstore(SS_POS_INV_DENOM_LOC, POS_INVERTED_DENOM_1_LOC) + mstore(SS_NEG_INV_DENOM_LOC, NEG_INVERTED_DENOM_1_LOC) + + mstore(SS_GEMINI_EVALS_LOC, GEMINI_A_EVAL_1) + let fold_pos_evals_loc := FOLD_POS_EVALUATIONS_1_LOC + + let shplonk_z := mload(SHPLONK_Z_CHALLENGE) + let scalars_loc := BATCH_SCALAR_37_LOC + + for { let i := 0 } lt(i, sub(LOG_N, 1)) { i := add(i, 1) } { + let scaling_factor_pos := mulmod(batching_challenge, mload(mload(SS_POS_INV_DENOM_LOC)), p) + let scaling_factor_neg := + mulmod(batching_challenge, mulmod(shplonk_nu, mload(mload(SS_NEG_INV_DENOM_LOC)), p), p) + + mstore(scalars_loc, addmod(sub(p, scaling_factor_neg), sub(p, scaling_factor_pos), p)) + + let accum_contribution := mulmod(scaling_factor_neg, mload(mload(SS_GEMINI_EVALS_LOC)), p) + accum_contribution := + addmod(accum_contribution, mulmod(scaling_factor_pos, mload(fold_pos_evals_loc), p), p) + + constant_term_acc := addmod(constant_term_acc, accum_contribution, p) + + batching_challenge := mulmod(batching_challenge, shplonk_nu_sqr, p) + + mstore(SS_POS_INV_DENOM_LOC, add(mload(SS_POS_INV_DENOM_LOC), 0x20)) + mstore(SS_NEG_INV_DENOM_LOC, add(mload(SS_NEG_INV_DENOM_LOC), 0x20)) + mstore(SS_GEMINI_EVALS_LOC, add(mload(SS_GEMINI_EVALS_LOC), 0x20)) + fold_pos_evals_loc := add(fold_pos_evals_loc, 0x20) + scalars_loc := add(scalars_loc, 0x20) + } + } + + let precomp_success_flag := 1 + let q := Q // EC group order + { + // The initial accumulator = 1 * shplonk_q + // WORKTODO(md): we can ignore this accumulation as we are multiplying by 1, + // Just set the accumulator instead. + mstore(SCALAR_LOCATION, 0x1) + { + let x := mload(SHPLONK_Q_X_LOC) + let y := mload(SHPLONK_Q_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + mcopy(G1_LOCATION, SHPLONK_Q_X_LOC, 0x40) + precomp_success_flag := staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR, 0x40) + } + + // Accumulate vk points + loadVk() + { + // Acumulator = acumulator + scalar[1] * vk[0] + mcopy(G1_LOCATION, Q_M_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_1_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[2] * vk[1] + mcopy(G1_LOCATION, Q_C_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_2_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[3] * vk[2] + mcopy(G1_LOCATION, Q_L_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_3_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[4] * vk[3] + mcopy(G1_LOCATION, Q_R_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_4_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[5] * vk[4] + mcopy(G1_LOCATION, Q_O_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_5_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[6] * vk[5] + mcopy(G1_LOCATION, Q_4_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_6_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[7] * vk[6] + mcopy(G1_LOCATION, Q_LOOKUP_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_7_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[8] * vk[7] + mcopy(G1_LOCATION, Q_ARITH_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_8_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[9] * vk[8] + mcopy(G1_LOCATION, Q_DELTA_RANGE_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_9_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[10] * vk[9] + mcopy(G1_LOCATION, Q_ELLIPTIC_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_10_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[11] * vk[10] + mcopy(G1_LOCATION, Q_MEMORY_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_11_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[12] * vk[11] + mcopy(G1_LOCATION, Q_NNF_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_12_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[13] * vk[12] + mcopy(G1_LOCATION, Q_POSEIDON_2_EXTERNAL_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_13_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[14] * vk[13] + mcopy(G1_LOCATION, Q_POSEIDON_2_INTERNAL_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_14_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[15] * vk[14] + mcopy(G1_LOCATION, SIGMA_1_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_15_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[16] * vk[15] + mcopy(G1_LOCATION, SIGMA_2_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_16_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[17] * vk[16] + mcopy(G1_LOCATION, SIGMA_3_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_17_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[18] * vk[17] + mcopy(G1_LOCATION, SIGMA_4_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_18_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[19] * vk[18] + mcopy(G1_LOCATION, ID_1_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_19_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[20] * vk[19] + mcopy(G1_LOCATION, ID_2_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_20_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[21] * vk[20] + mcopy(G1_LOCATION, ID_3_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_21_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[22] * vk[21] + mcopy(G1_LOCATION, ID_4_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_22_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[23] * vk[22] + mcopy(G1_LOCATION, TABLE_1_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_23_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[24] * vk[23] + mcopy(G1_LOCATION, TABLE_2_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_24_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[25] * vk[24] + mcopy(G1_LOCATION, TABLE_3_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_25_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[26] * vk[25] + mcopy(G1_LOCATION, TABLE_4_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_26_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[27] * vk[26] + mcopy(G1_LOCATION, LAGRANGE_FIRST_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_27_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[28] * vk[27] + mcopy(G1_LOCATION, LAGRANGE_LAST_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_28_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(W_L_X_LOC) + let y := mload(W_L_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + + // Accumulate proof points + // Accumulator = accumulator + scalar[29] * w_l + mcopy(G1_LOCATION, W_L_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_29_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(W_R_X_LOC) + let y := mload(W_R_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + + // Accumulator = accumulator + scalar[30] * w_r + mcopy(G1_LOCATION, W_R_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_30_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(W_O_X_LOC) + let y := mload(W_O_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + + // Accumulator = accumulator + scalar[31] * w_o + mcopy(G1_LOCATION, W_O_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_31_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulator = accumulator + scalar[32] * w_4 + { + let x := mload(W_4_X_LOC) + let y := mload(W_4_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + mcopy(G1_LOCATION, W_4_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_32_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(Z_PERM_X_LOC) + let y := mload(Z_PERM_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + // Accumulator = accumulator + scalar[33] * z_perm + mcopy(G1_LOCATION, Z_PERM_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_33_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(LOOKUP_INVERSES_X_LOC) + let y := mload(LOOKUP_INVERSES_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + // Accumulator = accumulator + scalar[34] * lookup_inverses + mcopy(G1_LOCATION, LOOKUP_INVERSES_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_34_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(LOOKUP_READ_COUNTS_X_LOC) + let y := mload(LOOKUP_READ_COUNTS_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + // Accumulator = accumulator + scalar[35] * lookup_read_counts + mcopy(G1_LOCATION, LOOKUP_READ_COUNTS_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_35_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + { + let x := mload(LOOKUP_READ_TAGS_X_LOC) + let y := mload(LOOKUP_READ_TAGS_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + // Accumulator = accumulator + scalar[36] * lookup_read_tags + mcopy(G1_LOCATION, LOOKUP_READ_TAGS_X_LOC, 0x40) + mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_36_LOC)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumulate these LOG_N scalars with the gemini fold univariates + { + { + /// {{ UNROLL_SECTION_START ACCUMULATE_GEMINI_FOLD_UNIVARIATE }} + /// {{ UNROLL_SECTION_END ACCUMULATE_GEMINI_FOLD_UNIVARIATE }} + } + } + + { + // Accumulate the constant term accumulator + // Accumulator = accumulator + 1 * costant term accumulator + mstore(G1_LOCATION, 0x01) + mstore(G1_Y_LOCATION, 0x02) + mstore(SCALAR_LOCATION, constant_term_acc) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + + // Accumlate final quotient commitment into shplonk check + // Accumulator = accumulator + shplonkZ * quotient commitment + { + let x := mload(KZG_QUOTIENT_X_LOC) + let y := mload(KZG_QUOTIENT_Y_LOC) + let xx := mulmod(x, x, q) + // validate on curve + precomp_success_flag := + and(eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)), precomp_success_flag) + } + mcopy(G1_LOCATION, KZG_QUOTIENT_X_LOC, 0x40) + + mstore(SCALAR_LOCATION, mload(SHPLONK_Z_CHALLENGE)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, ACCUMULATOR_2, 0x40)) + precomp_success_flag := + and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, ACCUMULATOR, 0x40)) + } + + if iszero(precomp_success_flag) { + mstore(0x00, BATCH_ACCUMULATION_FAILED_SELECTOR) + revert(0x00, 0x04) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* SHPLEMINI - complete */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PAIRING CHECK */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + { + // P_1 + mstore(0xc0, mload(KZG_QUOTIENT_X_LOC)) + mstore(0xe0, sub(q, mload(KZG_QUOTIENT_Y_LOC))) + + // p_0_agg + // 0x80 - p_0_agg x + // 0xa0 - p_0_agg y + mcopy(0x80, ACCUMULATOR, 0x40) + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PAIRING AGGREGATION */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + // Read the pairing encoded in the first 16 field elements of the proof + let p0_other_x := mload(PAIRING_POINT_0) + p0_other_x := or(shl(68, mload(PAIRING_POINT_1)), p0_other_x) + p0_other_x := or(shl(136, mload(PAIRING_POINT_2)), p0_other_x) + p0_other_x := or(shl(204, mload(PAIRING_POINT_3)), p0_other_x) + + let p0_other_y := mload(PAIRING_POINT_4) + p0_other_y := or(shl(68, mload(PAIRING_POINT_5)), p0_other_y) + p0_other_y := or(shl(136, mload(PAIRING_POINT_6)), p0_other_y) + p0_other_y := or(shl(204, mload(PAIRING_POINT_7)), p0_other_y) + + let p1_other_x := mload(PAIRING_POINT_8) + p1_other_x := or(shl(68, mload(PAIRING_POINT_9)), p1_other_x) + p1_other_x := or(shl(136, mload(PAIRING_POINT_10)), p1_other_x) + p1_other_x := or(shl(204, mload(PAIRING_POINT_11)), p1_other_x) + + let p1_other_y := mload(PAIRING_POINT_12) + p1_other_y := or(shl(68, mload(PAIRING_POINT_13)), p1_other_y) + p1_other_y := or(shl(136, mload(PAIRING_POINT_14)), p1_other_y) + p1_other_y := or(shl(204, mload(PAIRING_POINT_15)), p1_other_y) + + // Validate p_0_other on curve + let xx := mulmod(p0_other_x, p0_other_x, q) + let xxx := mulmod(xx, p0_other_x, q) + let yy := mulmod(p0_other_y, p0_other_y, q) + + let success := eq(yy, addmod(xxx, 3, q)) + + // Validate p_1_other on curve + xx := mulmod(p1_other_x, p1_other_x, q) + xxx := mulmod(xx, p1_other_x, q) + yy := mulmod(p1_other_y, p1_other_y, q) + + success := and(success, eq(yy, addmod(xxx, 3, q))) + + // p_0 + mstore(0x00, p0_other_x) + mstore(0x20, p0_other_y) + + // p_1 + mstore(0x40, p1_other_x) + mstore(0x60, p1_other_y) + + // p_1_agg is already in the correct location + + let recursion_separator := keccak256(0x00, 0x100) + + // Write separator back to scratch space + mstore(0x00, p0_other_x) + + mstore(0x40, recursion_separator) + // recursion_separator * p_0_other + success := and(success, staticcall(gas(), 0x07, 0x00, 0x60, 0x00, 0x40)) + + // (recursion_separator * p_0_other) + p_0_agg + mcopy(0x40, 0x80, 0x40) + // p_0 = (recursion_separator * p_0_other) + p_0_agg + success := and(success, staticcall(gas(), 6, 0x00, 0x80, 0x00, 0x40)) + + mstore(0x40, p1_other_x) + mstore(0x60, p1_other_y) + mstore(0x80, recursion_separator) + + success := and(success, staticcall(gas(), 7, 0x40, 0x60, 0x40, 0x40)) + + // Write p_1_agg back to scratch space + mcopy(0x80, 0xc0, 0x40) + + // 0xc0 - (recursion_separator * p_1_other) + p_1_agg + success := and(success, staticcall(gas(), 6, 0x40, 0x80, 0xc0, 0x40)) + + // G2 [1] + mstore(0x40, 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2) + mstore(0x60, 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed) + mstore(0x80, 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b) + mstore(0xa0, 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa) + + // G2 [x] + mstore(0x100, 0x260e01b251f6f1c7e7ff4e580791dee8ea51d87a358e038b4efe30fac09383c1) + mstore(0x120, 0x0118c4d5b837bcc2bc89b5b398b5974e9f5944073b32078b7e231fec938883b0) + mstore(0x140, 0x04fc6369f7110fe3d25156c1bb9a72859cf2a04641f99ba4ee413c80da6a5fe4) + mstore(0x160, 0x22febda3c0c0632a56475b4214e5615e11e6dd3f96e6cea2854a87d4dacc5e55) + + let pairing_success := and(success, staticcall(gas(), 8, 0x00, 0x180, 0x00, 0x20)) + if iszero(and(pairing_success, mload(0x00))) { + mstore(0x00, PAIRING_FAILED_SELECTOR) + revert(0x00, 0x04) + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* PAIRING CHECK - Complete */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + } + { + mstore(0x00, 0x01) + return(0x00, 0x20) // Proof succeeded! + } + } + } + } +} +)"; + +template std::string field_to_hex(const Field& f) +{ + std::ostringstream os; + os << f; + return os.str(); +} + +inline std::string int_to_hex(size_t i) +{ + std::ostringstream os; + os << "0x" << std::hex << i; + return os.str(); +} + +inline std::string get_optimized_honk_solidity_verifier(auto const& verification_key) +{ + std::string template_str = HONK_CONTRACT_OPT_SOURCE; + + // Helper function to replace template variables + auto set_template_param = [&template_str](const std::string& key, const std::string& value) { + std::string::size_type pos = 0; + std::string pattern = "{{ " + key + " }}"; + while ((pos = template_str.find(pattern, pos)) != std::string::npos) { + template_str.replace(pos, pattern.length(), value); + pos += value.length(); + } + }; + + set_template_param("VK_HASH", field_to_hex(verification_key->hash())); + set_template_param("CIRCUIT_SIZE", std::to_string(1 << verification_key->log_circuit_size)); + set_template_param("LOG_CIRCUIT_SIZE", std::to_string(verification_key->log_circuit_size)); + set_template_param("NUM_PUBLIC_INPUTS", std::to_string(verification_key->num_public_inputs)); + set_template_param("LOG_N_MINUS_ONE", std::to_string(verification_key->log_circuit_size - 1)); + set_template_param("NUMBER_OF_BARYCENTRIC_INVERSES", std::to_string(verification_key->log_circuit_size * 8)); + + uint32_t gemini_fold_univariate_length = static_cast((verification_key->log_circuit_size - 1) * 0x40); + uint32_t gemini_fold_univariate_hash_length = static_cast(gemini_fold_univariate_length + 0x20); + uint32_t gemini_evals_length = static_cast(verification_key->log_circuit_size * 0x20); + uint32_t gemini_evals_hash_length = static_cast(gemini_evals_length + 0x20); + + set_template_param("GEMINI_FOLD_UNIVARIATE_LENGTH", int_to_hex(gemini_fold_univariate_length)); + set_template_param("GEMINI_FOLD_UNIVARIATE_HASH_LENGTH", int_to_hex(gemini_fold_univariate_hash_length)); + set_template_param("GEMINI_EVALS_LENGTH", int_to_hex(gemini_evals_length)); + set_template_param("GEMINI_EVALS_HASH_LENGTH", int_to_hex(gemini_evals_hash_length)); + + // Verification Key + set_template_param("Q_L_X_LOC", field_to_hex(verification_key->q_l.x)); + set_template_param("Q_L_Y_LOC", field_to_hex(verification_key->q_l.y)); + set_template_param("Q_R_X_LOC", field_to_hex(verification_key->q_r.x)); + set_template_param("Q_R_Y_LOC", field_to_hex(verification_key->q_r.y)); + set_template_param("Q_O_X_LOC", field_to_hex(verification_key->q_o.x)); + set_template_param("Q_O_Y_LOC", field_to_hex(verification_key->q_o.y)); + set_template_param("Q_4_X_LOC", field_to_hex(verification_key->q_4.x)); + set_template_param("Q_4_Y_LOC", field_to_hex(verification_key->q_4.y)); + set_template_param("Q_M_X_LOC", field_to_hex(verification_key->q_m.x)); + set_template_param("Q_M_Y_LOC", field_to_hex(verification_key->q_m.y)); + set_template_param("Q_C_X_LOC", field_to_hex(verification_key->q_c.x)); + set_template_param("Q_C_Y_LOC", field_to_hex(verification_key->q_c.y)); + set_template_param("Q_LOOKUP_X_LOC", field_to_hex(verification_key->q_lookup.x)); + set_template_param("Q_LOOKUP_Y_LOC", field_to_hex(verification_key->q_lookup.y)); + set_template_param("Q_ARITH_X_LOC", field_to_hex(verification_key->q_arith.x)); + set_template_param("Q_ARITH_Y_LOC", field_to_hex(verification_key->q_arith.y)); + set_template_param("Q_DELTA_RANGE_X_LOC", field_to_hex(verification_key->q_delta_range.x)); + set_template_param("Q_DELTA_RANGE_Y_LOC", field_to_hex(verification_key->q_delta_range.y)); + set_template_param("Q_ELLIPTIC_X_LOC", field_to_hex(verification_key->q_elliptic.x)); + set_template_param("Q_ELLIPTIC_Y_LOC", field_to_hex(verification_key->q_elliptic.y)); + set_template_param("Q_MEMORY_X_LOC", field_to_hex(verification_key->q_memory.x)); + set_template_param("Q_MEMORY_Y_LOC", field_to_hex(verification_key->q_memory.y)); + set_template_param("Q_NNF_X_LOC", field_to_hex(verification_key->q_nnf.x)); + set_template_param("Q_NNF_Y_LOC", field_to_hex(verification_key->q_nnf.y)); + set_template_param("Q_POSEIDON_2_EXTERNAL_X_LOC", field_to_hex(verification_key->q_poseidon2_external.x)); + set_template_param("Q_POSEIDON_2_EXTERNAL_Y_LOC", field_to_hex(verification_key->q_poseidon2_external.y)); + set_template_param("Q_POSEIDON_2_INTERNAL_X_LOC", field_to_hex(verification_key->q_poseidon2_internal.x)); + set_template_param("Q_POSEIDON_2_INTERNAL_Y_LOC", field_to_hex(verification_key->q_poseidon2_internal.y)); + set_template_param("SIGMA_1_X_LOC", field_to_hex(verification_key->sigma_1.x)); + set_template_param("SIGMA_1_Y_LOC", field_to_hex(verification_key->sigma_1.y)); + set_template_param("SIGMA_2_X_LOC", field_to_hex(verification_key->sigma_2.x)); + set_template_param("SIGMA_2_Y_LOC", field_to_hex(verification_key->sigma_2.y)); + set_template_param("SIGMA_3_X_LOC", field_to_hex(verification_key->sigma_3.x)); + set_template_param("SIGMA_3_Y_LOC", field_to_hex(verification_key->sigma_3.y)); + set_template_param("SIGMA_4_X_LOC", field_to_hex(verification_key->sigma_4.x)); + set_template_param("SIGMA_4_Y_LOC", field_to_hex(verification_key->sigma_4.y)); + set_template_param("TABLE_1_X_LOC", field_to_hex(verification_key->table_1.x)); + set_template_param("TABLE_1_Y_LOC", field_to_hex(verification_key->table_1.y)); + set_template_param("TABLE_2_X_LOC", field_to_hex(verification_key->table_2.x)); + set_template_param("TABLE_2_Y_LOC", field_to_hex(verification_key->table_2.y)); + set_template_param("TABLE_3_X_LOC", field_to_hex(verification_key->table_3.x)); + set_template_param("TABLE_3_Y_LOC", field_to_hex(verification_key->table_3.y)); + set_template_param("TABLE_4_X_LOC", field_to_hex(verification_key->table_4.x)); + set_template_param("TABLE_4_Y_LOC", field_to_hex(verification_key->table_4.y)); + set_template_param("ID_1_X_LOC", field_to_hex(verification_key->id_1.x)); + set_template_param("ID_1_Y_LOC", field_to_hex(verification_key->id_1.y)); + set_template_param("ID_2_X_LOC", field_to_hex(verification_key->id_2.x)); + set_template_param("ID_2_Y_LOC", field_to_hex(verification_key->id_2.y)); + set_template_param("ID_3_X_LOC", field_to_hex(verification_key->id_3.x)); + set_template_param("ID_3_Y_LOC", field_to_hex(verification_key->id_3.y)); + set_template_param("ID_4_X_LOC", field_to_hex(verification_key->id_4.x)); + set_template_param("ID_4_Y_LOC", field_to_hex(verification_key->id_4.y)); + set_template_param("LAGRANGE_FIRST_X_LOC", field_to_hex(verification_key->lagrange_first.x)); + set_template_param("LAGRANGE_FIRST_Y_LOC", field_to_hex(verification_key->lagrange_first.y)); + set_template_param("LAGRANGE_LAST_X_LOC", field_to_hex(verification_key->lagrange_last.x)); + set_template_param("LAGRANGE_LAST_Y_LOC", field_to_hex(verification_key->lagrange_last.y)); + + // Generate unrolled sections based on LOG_N + auto generate_unroll_section = [](const std::string& section_name, auto log_n) { + std::ostringstream code; + + if (section_name == "ACCUMULATE_INVERSES") { + // Generate INVERTED_CHALLENEGE_POW_MINUS_U accumulations + for (int i = 0; i < log_n; ++i) { + code << " // i = " << i << "\n"; + code << " mstore(TEMP_" << i << "_LOC, accumulator)\n"; + code << " accumulator := mulmod(accumulator, mload(INVERTED_CHALLENEGE_POW_MINUS_U_" << i + << "_LOC), p)\n"; + } + + code << "\n // Accumulate pos inverted denom\n"; + int temp_idx = log_n; + for (int i = 0; i < log_n; ++i) { + code << " // i = " << i << "\n"; + code << " mstore(TEMP_" << temp_idx << "_LOC, accumulator)\n"; + code << " accumulator := mulmod(accumulator, mload(POS_INVERTED_DENOM_" << i + << "_LOC), p)\n"; + temp_idx++; + } + + code << "\n // Accumulate neg inverted denom\n"; + for (int i = 0; i < log_n; ++i) { + code << " // i = " << i << "\n"; + code << " mstore(TEMP_" << temp_idx << "_LOC, accumulator)\n"; + code << " accumulator := mulmod(accumulator, mload(NEG_INVERTED_DENOM_" << i + << "_LOC), p)\n"; + temp_idx++; + } + } else if (section_name == "COLLECT_INVERSES") { + int temp_idx = 3 * log_n - 1; + + // Process NEG_INVERTED_DENOM in reverse order + code << " // i = " << log_n << "\n"; + for (int i = log_n - 1; i >= 0; --i) { + code << " {\n"; + code << " let tmp := mulmod(accumulator, mload(TEMP_" << temp_idx << "_LOC), p)\n"; + code << " accumulator := mulmod(accumulator, mload(NEG_INVERTED_DENOM_" << i + << "_LOC), p)\n"; + code << " mstore(NEG_INVERTED_DENOM_" << i << "_LOC, tmp)\n"; + code << " }\n"; + if (i > 0) { + code << " // i = " << i << "\n"; + } + temp_idx--; + } + + code << "\n // Unrolled for LOG_N = " << log_n << "\n"; + code << " // i = " << log_n << "\n"; + + // Process POS_INVERTED_DENOM in reverse order + for (int i = log_n - 1; i >= 0; --i) { + code << " {\n"; + code << " let tmp := mulmod(accumulator, mload(TEMP_" << temp_idx << "_LOC), p)\n"; + code << " accumulator := mulmod(accumulator, mload(POS_INVERTED_DENOM_" << i + << "_LOC), p)\n"; + code << " mstore(POS_INVERTED_DENOM_" << i << "_LOC, tmp)\n"; + code << " }\n"; + if (i > 0) { + code << " // i = " << i << "\n"; + } + temp_idx--; + } + + code << "\n // i = " << log_n << "\n"; + + // Process INVERTED_CHALLENEGE_POW_MINUS_U in reverse order + for (int i = log_n - 1; i >= 0; --i) { + code << " {\n"; + code << " let tmp := mulmod(accumulator, mload(TEMP_" << temp_idx << "_LOC), p)\n"; + code << " accumulator := mulmod(accumulator, mload(INVERTED_CHALLENEGE_POW_MINUS_U_" << i + << "_LOC), p)\n"; + code << " mstore(INVERTED_CHALLENEGE_POW_MINUS_U_" << i << "_LOC, tmp)\n"; + code << " }\n"; + if (i > 0) { + code << " // i = " << i << "\n"; + } + temp_idx--; + } + } else if (section_name == "ACCUMULATE_GEMINI_FOLD_UNIVARIATE") { + // Generate GEMINI_FOLD_UNIVARIATE accumulations + // We need log_n - 1 folding commitments + for (int i = 0; i < log_n - 1; ++i) { + // Validate on curve then accumulate + code << " {\n"; + code << " let x := mload(GEMINI_FOLD_UNIVARIATE_" << i << "_X_LOC)\n"; + code << " let y := mload(GEMINI_FOLD_UNIVARIATE_" << i << "_Y_LOC)\n"; + code << " let xx := mulmod(x, x, q)\n"; + code << " // validate on curve\n"; + code << " precomp_success_flag := and(eq(mulmod(y, y, q), addmod(mulmod(x, " + "xx, q), 3, q)), precomp_success_flag)\n"; + code << " }\n"; + code << " mcopy(G1_LOCATION, GEMINI_FOLD_UNIVARIATE_" << i << "_X_LOC, 0x40)\n"; + code << " mstore(SCALAR_LOCATION, mload(BATCH_SCALAR_" << (37 + i) << "_LOC))\n"; + code << " precomp_success_flag :=\n"; + code << " and(precomp_success_flag, staticcall(gas(), 7, G1_LOCATION, 0x60, " + "ACCUMULATOR_2, 0x40))\n"; + code << " precomp_success_flag :=\n"; + code << " and(precomp_success_flag, staticcall(gas(), 6, ACCUMULATOR, 0x80, " + "ACCUMULATOR, 0x40))\n"; + if (i < log_n - 2) { + code << "\n"; + } + } + } else if (section_name == "GEMINI_FOLD_UNIVARIATE_ON_CURVE") { + // Generate GEMINI_FOLD_UNIVARIATE_ON_CURVE validations + // We need log_n - 1 folding commitments to validate + for (int i = 0; i < log_n - 1; ++i) { + code << " success_flag := and(success_flag, " + "validateProofPointOnCurve(GEMINI_FOLD_UNIVARIATE_" + << i << "_X_LOC, q))\n"; + } + } + + return code.str(); + }; + + // Replace UNROLL_SECTION blocks + int log_n = static_cast(verification_key->log_circuit_size); + + // Replace ACCUMULATE_INVERSES section + { + std::string::size_type start_pos = template_str.find("/// {{ UNROLL_SECTION_START ACCUMULATE_INVERSES }}"); + std::string::size_type end_pos = template_str.find("/// {{UNROLL_SECTION_END ACCUMULATE_INVERSES }}"); + if (start_pos != std::string::npos && end_pos != std::string::npos) { + std::string::size_type start_line_end = template_str.find("\n", start_pos); + std::string generated_code = generate_unroll_section("ACCUMULATE_INVERSES", log_n); + template_str = template_str.substr(0, start_line_end + 1) + generated_code + template_str.substr(end_pos); + } + } + + // Replace COLLECT_INVERSES section + { + std::string::size_type start_pos = template_str.find("// {{ UNROLL_SECTION_START COLLECT_INVERSES }}"); + std::string::size_type end_pos = template_str.find("// {{ UNROLL_SECTION_END COLLECT_INVERSES }}"); + if (start_pos != std::string::npos && end_pos != std::string::npos) { + std::string::size_type start_line_end = template_str.find("\n", start_pos); + std::string generated_code = generate_unroll_section("COLLECT_INVERSES", log_n); + template_str = template_str.substr(0, start_line_end + 1) + generated_code + template_str.substr(end_pos); + } + } + + // Replace ACCUMULATE_GEMINI_FOLD_UNIVARIATE section + { + std::string::size_type start_pos = + template_str.find("/// {{ UNROLL_SECTION_START ACCUMULATE_GEMINI_FOLD_UNIVARIATE }}"); + std::string::size_type end_pos = + template_str.find("/// {{ UNROLL_SECTION_END ACCUMULATE_GEMINI_FOLD_UNIVARIATE }}"); + if (start_pos != std::string::npos && end_pos != std::string::npos) { + std::string::size_type start_line_end = template_str.find("\n", start_pos); + std::string generated_code = generate_unroll_section("ACCUMULATE_GEMINI_FOLD_UNIVARIATE", log_n); + template_str = template_str.substr(0, start_line_end + 1) + generated_code + template_str.substr(end_pos); + } + } + + // Replace GEMINI_FOLD_UNIVARIATE_ON_CURVE section + { + std::string::size_type start_pos = + template_str.find("/// {{ UNROLL_SECTION_START GEMINI_FOLD_UNIVARIATE_ON_CURVE }}"); + std::string::size_type end_pos = + template_str.find("/// {{ UNROLL_SECTION_END GEMINI_FOLD_UNIVARIATE_ON_CURVE }}"); + if (start_pos != std::string::npos && end_pos != std::string::npos) { + std::string::size_type start_line_end = template_str.find("\n", start_pos); + std::string generated_code = generate_unroll_section("GEMINI_FOLD_UNIVARIATE_ON_CURVE", log_n); + template_str = template_str.substr(0, start_line_end + 1) + generated_code + template_str.substr(end_pos); + } + } + + // Replace Memory Layout + { + std::string::size_type start_pos = template_str.find("// {{ SECTION_START MEMORY_LAYOUT }}"); + std::string::size_type end_pos = template_str.find("// {{ SECTION_END MEMORY_LAYOUT }}"); + if (start_pos != std::string::npos && end_pos != std::string::npos) { + std::string::size_type start_line_end = template_str.find("\n", start_pos); + std::string generated_code = generate_memory_offsets(log_n); + template_str = template_str.substr(0, start_line_end + 1) + generated_code + template_str.substr(end_pos); + } + } + + return template_str; +} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp index 4f320556e438..50af696e84e5 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp @@ -15,7 +15,7 @@ static const char HONK_ZK_CONTRACT_SOURCE[] = R"( pragma solidity ^0.8.27; interface IVerifier { - function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool); + function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external returns (bool); } type Fr is uint256; @@ -494,10 +494,10 @@ library ZKTranscriptLib { pure returns (Fr[CONST_PROOF_SIZE_LOG_N] memory gateChallenges, Fr nextPreviousChallenge) { - for (uint256 i = 0; i < logN; i++) { - previousChallenge = FrLib.fromBytes32(keccak256(abi.encodePacked(Fr.unwrap(previousChallenge)))); - - (gateChallenges[i],) = splitChallenge(previousChallenge); + previousChallenge = FrLib.fromBytes32(keccak256(abi.encodePacked(Fr.unwrap(previousChallenge)))); + (gateChallenges[0],) = splitChallenge(previousChallenge); + for (uint256 i = 1; i < logN; i++) { + gateChallenges[i] = gateChallenges[i - 1] * gateChallenges[i - 1]; } nextPreviousChallenge = previousChallenge; } @@ -729,6 +729,7 @@ library RelationsLib { accumulateNnfRelation(purportedEvaluations, evaluations, powPartialEval); accumulatePoseidonExternalRelation(purportedEvaluations, evaluations, powPartialEval); accumulatePoseidonInternalRelation(purportedEvaluations, evaluations, powPartialEval); + // batch the subrelations with the alpha challenges to obtain the full honk relation accumulator = scaleAndBatchSubrelations(evaluations, alphas); } @@ -1106,7 +1107,7 @@ library RelationsLib { ap.index_delta = wire(p, WIRE.W_L_SHIFT) - wire(p, WIRE.W_L); ap.record_delta = wire(p, WIRE.W_4_SHIFT) - wire(p, WIRE.W_4); - ap.index_is_monotonically_increasing = ap.index_delta * ap.index_delta - ap.index_delta; // deg 2 + ap.index_is_monotonically_increasing = ap.index_delta * (ap.index_delta - Fr.wrap(1)); // deg 2 ap.adjacent_values_match_if_adjacent_indices_match = (ap.index_delta * MINUS_ONE + ONE) * ap.record_delta; // deg 2 @@ -1137,7 +1138,7 @@ library RelationsLib { * with a WRITE operation. */ Fr access_type = (wire(p, WIRE.W_4) - ap.partial_record_check); // will be 0 or 1 for honest Prover; deg 1 or 4 - ap.access_check = access_type * access_type - access_type; // check value is 0 or 1; deg 2 or 8 + ap.access_check = access_type * (access_type - Fr.wrap(1)); // check value is 0 or 1; deg 2 or 8 // reverse order we could re-use `ap.partial_record_check` 1 - ((w3' * eta + w2') * eta + w1') * eta // deg 1 or 4 @@ -1311,7 +1312,7 @@ library RelationsLib { function accumulatePoseidonExternalRelation( Fr[NUMBER_OF_ENTITIES] memory p, Fr[NUMBER_OF_SUBRELATIONS] memory evals, - Fr domainSep // i guess this is the scaling factor? + Fr domainSep ) internal pure { PoseidonExternalParams memory ep; @@ -1409,7 +1410,7 @@ library RelationsLib { Fr[NUMBER_OF_SUBRELATIONS] memory evaluations, Fr[NUMBER_OF_ALPHAS] memory subrelationChallenges ) internal pure returns (Fr accumulator) { - accumulator = accumulator + evaluations[0]; + accumulator = evaluations[0]; for (uint256 i = 1; i < NUMBER_OF_SUBRELATIONS; ++i) { accumulator = accumulator + evaluations[i] * subrelationChallenges[i - 1]; @@ -1476,10 +1477,9 @@ library CommitmentSchemeLib { ); // Divide by the denominator batchedEvalRoundAcc = batchedEvalRoundAcc * (challengePower * (ONE - u) + u).invert(); - if (i <= logSize) { - batchedEvalAccumulator = batchedEvalRoundAcc; - foldPosEvaluations[i - 1] = batchedEvalRoundAcc; - } + + batchedEvalAccumulator = batchedEvalRoundAcc; + foldPosEvaluations[i - 1] = batchedEvalRoundAcc; } return foldPosEvaluations; } @@ -1853,6 +1853,8 @@ abstract contract BaseZKHonkVerifier is IVerifier { verified = true; } + uint256 constant PERMUTATION_ARGUMENT_VALUE_SEPARATOR = 1 << 28; + function computePublicInputDelta( bytes32[] memory publicInputs, Fr[PAIRING_POINTS_SIZE] memory pairingPointObject, @@ -1863,7 +1865,7 @@ abstract contract BaseZKHonkVerifier is IVerifier { Fr numerator = Fr.wrap(1); Fr denominator = Fr.wrap(1); - Fr numeratorAcc = gamma + (beta * FrLib.from($N + offset)); + Fr numeratorAcc = gamma + (beta * FrLib.from(PERMUTATION_ARGUMENT_VALUE_SEPARATOR + offset)); Fr denominatorAcc = gamma - (beta * FrLib.from(offset + 1)); { @@ -2161,7 +2163,7 @@ abstract contract BaseZKHonkVerifier is IVerifier { boundary += $LOG_N - 1; - // Finalise the batch opening claim + // Finalize the batch opening claim mem.denominators[0] = Fr.wrap(1).div(tp.shplonkZ - tp.geminiR); mem.denominators[1] = Fr.wrap(1).div(tp.shplonkZ - SUBGROUP_GENERATOR * tp.geminiR); mem.denominators[2] = mem.denominators[0]; diff --git a/barretenberg/cpp/src/barretenberg/ecc/batched_affine_addition/batched_affine_addition.cpp b/barretenberg/cpp/src/barretenberg/ecc/batched_affine_addition/batched_affine_addition.cpp index 12049bc7d154..4e46fe0d4319 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/batched_affine_addition/batched_affine_addition.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/batched_affine_addition/batched_affine_addition.cpp @@ -18,7 +18,7 @@ template std::vector::G1> BatchedAffineAddition::add_in_place( const std::span& points, const std::vector& sequence_counts) { - PROFILE_THIS_NAME("BatchedAffineAddition::add_in_place"); + BB_BENCH_NAME("BatchedAffineAddition::add_in_place"); // Instantiate scratch space for point addition denominators and their calculation std::vector scratch_space_vector(points.size()); std::span scratch_space(scratch_space_vector); diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp index a3831f6a63ec..748f6968f793 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp @@ -45,4 +45,4 @@ class BN254 { // max(BATCHED_PARTIAL_RELATION_LENGTH) for BN254 Flavors with ZK static constexpr uint32_t LIBRA_UNIVARIATES_LENGTH = 9; }; -} // namespace bb::curve \ No newline at end of file +} // namespace bb::curve diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/fq.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/fq.test.cpp index 3e571de9334f..c999e543acc9 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/fq.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/fq.test.cpp @@ -385,6 +385,9 @@ TEST(fq, SplitIntoEndomorphismScalars) k1.self_to_montgomery_form(); k2.self_to_montgomery_form(); + EXPECT_LT(uint256_t(k1).get_msb(), 128); + EXPECT_LT(uint256_t(k2).get_msb(), 128); + result = k2 * fq::cube_root_of_unity(); result = k1 - result; @@ -407,6 +410,37 @@ TEST(fq, SplitIntoEndomorphismScalarsSimple) k1.self_to_montgomery_form(); k2.self_to_montgomery_form(); + EXPECT_LT(uint256_t(k1).get_msb(), 128); + EXPECT_LT(uint256_t(k2).get_msb(), 128); + + fq beta = fq::cube_root_of_unity(); + result = k2 * beta; + result = k1 - result; + + result.self_from_montgomery_form(); + for (size_t i = 0; i < 4; ++i) { + EXPECT_EQ(result.data[i], k.data[i]); + } +} + +TEST(fq, SplitIntoEndomorphismEdgeCase) +{ + + fq input = { 0, 0, 1, 0 }; // 2^128 + fq k = { 0, 0, 0, 0 }; + fq k1 = { 0, 0, 0, 0 }; + fq k2 = { 0, 0, 0, 0 }; + fq::__copy(input, k); + + fq::split_into_endomorphism_scalars(k, k1, k2); + + fq result{ 0, 0, 0, 0 }; + k1.self_to_montgomery_form(); + k2.self_to_montgomery_form(); + + EXPECT_LT(uint256_t(k1).get_msb(), 128); + EXPECT_LT(uint256_t(k2).get_msb(), 128); + fq beta = fq::cube_root_of_unity(); result = k2 * beta; result = k1 - result; diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp index e60fd53f40b7..c2679366b26d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp @@ -343,8 +343,15 @@ template struct alignas(32) field { static constexpr uint256_t modulus_minus_two = uint256_t(Params::modulus_0 - 2ULL, Params::modulus_1, Params::modulus_2, Params::modulus_3); constexpr field invert() const noexcept; - static void batch_invert(std::span coeffs) noexcept; + template + // has size() and operator[]. + requires requires(C& c) { + { c.size() } -> std::convertible_to; + { c[0] }; + } + static void batch_invert(C& coeffs) noexcept; static void batch_invert(field* coeffs, size_t n) noexcept; + static void batch_invert(std::span coeffs) noexcept; /** * @brief Compute square root of the field element. * diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_docs.md b/barretenberg/cpp/src/barretenberg/ecc/fields/field_docs.md index 1c52ca648136..f91bcaa1cfc1 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_docs.md +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_docs.md @@ -45,7 +45,7 @@ The generic implementation has 2 purposes: 1. Building barretenberg on platforms we haven't targeted in the past (new ARM-based Macs, for example) 2. Compile-time computation of constant expressions, since we can't use the assembly implementation for those. -The assembly implementation for x86_64 is optimised. There are 2 versions: +The assembly implementation for x86_64 is optimized. There are 2 versions: 1. General x86_64 implementation that uses 64-bit registers. The squaring operation is equivalent to multiplication for simplicity and because the original squaring implementation was quite buggy. 2. Implementation using Intel ADX. It allows simultaneous use of two addition-with carry operations (adox and adcx) on two separate CPU gates (units of execution that can work simultaneously on the same core), which almost halves the time spent adding up the results of uint64_t multiplication. @@ -59,18 +59,18 @@ In the past we implemented a version with 32-bit limbs, but as a result, when we 1. This spawned in a lot of masking operations 2. We didn't use more efficient algorithms for squaring, because multiplication by 2 of intermediate products would once again overflow. -Switching to 9 29-bit limbs increased the number of multiplications from 136 to 171. However, since the product of 2 limbs is 58 bits, we can safely accumulate 64 of those before we have to reduce. This allowed us to get rid of a lot of intermediate masking operations, shifts and additions, so the resulting computation turned out to be more efficient. +Switching to 9 29-bit limbs increased the number of multiplications from 136 to 171. However, since the product of 2 limbs is 58 bits, we can safely accumulate 64 of those before we have to reduce. This allowed us to get rid of a lot of intermediate masking operations, shifts and additions, so the resulting computation turned out to be more efficient. ## Interaction of field object with other objects Most of the time field is used with uint64_t or uint256_t in our codebase, but there is general logic of how we generate field elements from integers: 1. Converting from signed int takes the sign into account. It takes the absolute value, converts it to montgomery and then negates the result if the original value was negative 2. Unsigned integers ( <= 64 bits) are just converted to montgomery -3. uint256_t and uint512_t: +3. uint256_t and uint512_t: 1. Truncate to 256 bits 2. Subtract the modulus until the value is within field 3. Convert to montgomery -Conversion from field elements exists only to unsigned integers and bools. The value is converted from montgomery and appropriate number of lowest bits is used to initialize the value. +Conversion from field elements exists only to unsigned integers and bools. The value is converted from montgomery and appropriate number of lowest bits is used to initialize the value. **N.B.** Functions for converting from uint256_t and back are not bijective, since values \f$ \ge p\f$ will be reduced. @@ -100,7 +100,7 @@ def parse_field_params(s): raise ValueError("Couldn't find value with name "+name) eq_position=s[index:].find('=') line_end=s[index:].find(';') - return parse_number(s[index+eq_position+1:index+line_end]) + return parse_number(s[index+eq_position+1:index+line_end]) def recover_single_value_if_present(name): nonlocal s @@ -109,7 +109,7 @@ def parse_field_params(s): return None eq_position=s[index:].find('=') line_end=s[index:].find(';') - return parse_number(s[index+eq_position+1:index+line_end]) + return parse_number(s[index+eq_position+1:index+line_end]) def recover_array(name): nonlocal s diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp index 6759ffa76216..711e6ef1edb2 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp @@ -6,7 +6,7 @@ #pragma once #include "barretenberg/common/assert.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" @@ -34,7 +34,6 @@ namespace bb { **/ template constexpr field field::operator*(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::mul"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { // >= 255-bits or <= 64-bits. @@ -49,7 +48,6 @@ template constexpr field field::operator*(const field& other) co template constexpr field& field::operator*=(const field& other) & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_mul"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { // >= 255-bits or <= 64-bits. @@ -71,7 +69,6 @@ template constexpr field& field::operator*=(const field& other) **/ template constexpr field field::sqr() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::sqr"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { return montgomery_square(); @@ -85,7 +82,6 @@ template constexpr field field::sqr() const noexcept template constexpr void field::self_sqr() & noexcept { - BB_OP_COUNT_TRACK_NAME("f::self_sqr"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { *this = montgomery_square(); @@ -105,7 +101,6 @@ template constexpr void field::self_sqr() & noexcept **/ template constexpr field field::operator+(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::add"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { return add(other); @@ -119,7 +114,6 @@ template constexpr field field::operator+(const field& other) co template constexpr field& field::operator+=(const field& other) & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_add"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { (*this) = operator+(other); @@ -135,14 +129,12 @@ template constexpr field& field::operator+=(const field& other) template constexpr field field::operator++() noexcept { - BB_OP_COUNT_TRACK_NAME("++f"); return *this += 1; } // NOLINTNEXTLINE(cert-dcl21-cpp) circular linting errors. If const is added, linter suggests removing template constexpr field field::operator++(int) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::increment"); field value_before_incrementing = *this; *this += 1; return value_before_incrementing; @@ -155,7 +147,6 @@ template constexpr field field::operator++(int) noexcept **/ template constexpr field field::operator-(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::sub"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { return subtract_coarse(other); // modulus - *this; @@ -169,7 +160,6 @@ template constexpr field field::operator-(const field& other) co template constexpr field field::operator-() const noexcept { - BB_OP_COUNT_TRACK_NAME("-f"); if constexpr ((T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { constexpr field p{ modulus.data[0], modulus.data[1], modulus.data[2], modulus.data[3] }; @@ -198,7 +188,6 @@ template constexpr field field::operator-() const noexcept template constexpr field& field::operator-=(const field& other) & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_sub"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { *this = subtract_coarse(other); // subtract(other); @@ -214,7 +203,6 @@ template constexpr field& field::operator-=(const field& other) template constexpr void field::self_neg() & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_neg"); if constexpr ((T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { constexpr field p{ modulus.data[0], modulus.data[1], modulus.data[2], modulus.data[3] }; @@ -227,7 +215,6 @@ template constexpr void field::self_neg() & noexcept template constexpr void field::self_conditional_negate(const uint64_t predicate) & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_conditional_negate"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { *this = predicate ? -(*this) : *this; // NOLINT @@ -253,7 +240,6 @@ template constexpr void field::self_conditional_negate(const uint64 */ template constexpr bool field::operator>(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::gt"); const field left = reduce_once(); const field right = other.reduce_once(); const bool t0 = left.data[3] > right.data[3]; @@ -283,7 +269,6 @@ template constexpr bool field::operator<(const field& other) const template constexpr bool field::operator==(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::eqeq"); const field left = reduce_once(); const field right = other.reduce_once(); return (left.data[0] == right.data[0]) && (left.data[1] == right.data[1]) && (left.data[2] == right.data[2]) && @@ -297,7 +282,6 @@ template constexpr bool field::operator!=(const field& other) const template constexpr field field::to_montgomery_form() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::to_montgomery_form"); constexpr field r_squared = field{ r_squared_uint.data[0], r_squared_uint.data[1], r_squared_uint.data[2], r_squared_uint.data[3] }; @@ -316,14 +300,12 @@ template constexpr field field::to_montgomery_form() const noexc template constexpr field field::from_montgomery_form() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::from_montgomery_form"); constexpr field one_raw{ 1, 0, 0, 0 }; return operator*(one_raw).reduce_once(); } template constexpr void field::self_to_montgomery_form() & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_to_montgomery_form"); constexpr field r_squared = field{ r_squared_uint.data[0], r_squared_uint.data[1], r_squared_uint.data[2], r_squared_uint.data[3] }; @@ -336,7 +318,6 @@ template constexpr void field::self_to_montgomery_form() & noexcept template constexpr void field::self_from_montgomery_form() & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_from_montgomery_form"); constexpr field one_raw{ 1, 0, 0, 0 }; *this *= one_raw; self_reduce_once(); @@ -344,7 +325,6 @@ template constexpr void field::self_from_montgomery_form() & noexce template constexpr field field::reduce_once() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::reduce_once"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { return reduce(); @@ -358,7 +338,6 @@ template constexpr field field::reduce_once() const noexcept template constexpr void field::self_reduce_once() & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_reduce_once"); if constexpr (BBERG_NO_ASM || (T::modulus_3 >= 0x4000000000000000ULL) || (T::modulus_1 == 0 && T::modulus_2 == 0 && T::modulus_3 == 0)) { *this = reduce(); @@ -373,7 +352,6 @@ template constexpr void field::self_reduce_once() & noexcept template constexpr field field::pow(const uint256_t& exponent) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::pow"); field accumulator{ data[0], data[1], data[2], data[3] }; field to_mul{ data[0], data[1], data[2], data[3] }; const uint64_t maximum_set_bit = exponent.get_msb(); @@ -399,22 +377,31 @@ template constexpr field field::pow(const uint64_t exponent) con template constexpr field field::invert() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::invert"); if (*this == zero()) { - throw_or_abort("Trying to invert zero in the field"); + bb::assert_failure("Trying to invert zero in the field"); } return pow(modulus_minus_two); } +// TODO(https://github.com/AztecProtocol/barretenberg/issues/1166) template void field::batch_invert(field* coeffs, const size_t n) noexcept { batch_invert(std::span{ coeffs, n }); } -// TODO(https://github.com/AztecProtocol/barretenberg/issues/1166) template void field::batch_invert(std::span coeffs) noexcept { - PROFILE_THIS_NAME("fr::batch_invert"); + batch_invert(coeffs); +} + +template +template + requires requires(C& c) { + { c.size() } -> std::convertible_to; + { c[0] }; + } +void field::batch_invert(C& coeffs) noexcept +{ const size_t n = coeffs.size(); auto temporaries_ptr = std::static_pointer_cast(get_mem_slab(n * sizeof(field))); @@ -462,7 +449,7 @@ template void field::batch_invert(std::span coeffs) noexcept } /** - * @brief Implements an optimised variant of Tonelli-Shanks via lookup tables. + * @brief Implements an optimized variant of Tonelli-Shanks via lookup tables. * Algorithm taken from https://cr.yp.to/papers/sqroot-20011123-retypeset20220327.pdf * "FASTER SQUARE ROOTS IN ANNOYING FINITE FIELDS" by D. Bernstein * Page 5 "Accelerated Discrete Logarithm" @@ -471,7 +458,6 @@ template void field::batch_invert(std::span coeffs) noexcept */ template constexpr field field::tonelli_shanks_sqrt() const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::tonelli_shanks_sqrt"); // Tonelli-shanks algorithm begins by finding a field element Q and integer S, // such that (p - 1) = Q.2^{s} // We can determine s by counting the least significant set bit of `p - 1` @@ -623,7 +609,6 @@ template constexpr std::pair> field::sqrt() const noexcept requires((T::modulus_0 & 0x3UL) == 0x3UL) { - BB_OP_COUNT_TRACK_NAME("fr::sqrt"); constexpr uint256_t sqrt_exponent = (modulus + uint256_t(1)) >> 2; field root = pow(sqrt_exponent); if ((root * root) == (*this)) { @@ -645,13 +630,11 @@ constexpr std::pair> field::sqrt() const noexcept template constexpr field field::operator/(const field& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("fr::div"); return operator*(other.invert()); } template constexpr field& field::operator/=(const field& other) & noexcept { - BB_OP_COUNT_TRACK_NAME("fr::self_div"); *this = operator/(other); return *this; } @@ -692,7 +675,6 @@ template constexpr field field::get_root_of_unity(size_t subgrou template field field::random_element(numeric::RNG* engine) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::random_element"); if (engine == nullptr) { engine = &numeric::get_randomness(); } @@ -706,7 +688,6 @@ template field field::random_element(numeric::RNG* engine) noexc template constexpr size_t field::primitive_root_log_size() noexcept { - BB_OP_COUNT_TRACK_NAME("fr::primitive_root_log_size"); uint256_t target = modulus - 1; size_t result = 0; while (!target.get_bit(result)) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_generic.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_generic.hpp index 15c749fe89af..232a3bc64f99 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_generic.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_generic.hpp @@ -10,7 +10,7 @@ #include #include "./field_impl.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" namespace bb { @@ -111,7 +111,6 @@ constexpr uint64_t field::addc(const uint64_t a, const uint64_t carry_in, uint64_t& carry_out) noexcept { - BB_OP_COUNT_TRACK(); #if defined(__SIZEOF_INT128__) && !defined(__wasm__) uint128_t res = static_cast(a) + static_cast(b) + static_cast(carry_in); carry_out = static_cast(res >> 64); @@ -909,7 +908,8 @@ template constexpr field field::montgomery_square() const noexce #endif } -template constexpr struct field::wide_array field::mul_512(const field& other) const noexcept { +template constexpr struct field::wide_array field::mul_512(const field& other) const noexcept +{ #if defined(__SIZEOF_INT128__) && !defined(__wasm__) uint64_t carry_2 = 0; auto [r0, carry] = mul_wide(data[0], other.data[0]); diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_x64.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_x64.hpp index 89a56c557692..7af0c44eb894 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_x64.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl_x64.hpp @@ -13,8 +13,6 @@ namespace bb { template field field::asm_mul_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_mul_with_coarse_reduction"); - field r; constexpr uint64_t r_inv = T::r_inv; constexpr uint64_t modulus_0 = modulus.data[0]; @@ -50,8 +48,6 @@ template field field::asm_mul_with_coarse_reduction(const field& template void field::asm_self_mul_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_self_mul_with_coarse_reduction"); - constexpr uint64_t r_inv = T::r_inv; constexpr uint64_t modulus_0 = modulus.data[0]; constexpr uint64_t modulus_1 = modulus.data[1]; @@ -83,8 +79,6 @@ template void field::asm_self_mul_with_coarse_reduction(const field template field field::asm_sqr_with_coarse_reduction(const field& a) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_sqr_with_coarse_reduction"); - field r; constexpr uint64_t r_inv = T::r_inv; constexpr uint64_t modulus_0 = modulus.data[0]; @@ -149,8 +143,6 @@ template field field::asm_sqr_with_coarse_reduction(const field& template void field::asm_self_sqr_with_coarse_reduction(const field& a) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_self_sqr_with_coarse_reduction"); - constexpr uint64_t r_inv = T::r_inv; constexpr uint64_t modulus_0 = modulus.data[0]; constexpr uint64_t modulus_1 = modulus.data[1]; @@ -210,8 +202,6 @@ template void field::asm_self_sqr_with_coarse_reduction(const field template field field::asm_add_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_add_with_coarse_reduction"); - field r; constexpr uint64_t twice_not_modulus_0 = twice_not_modulus.data[0]; @@ -239,8 +229,6 @@ template field field::asm_add_with_coarse_reduction(const field& template void field::asm_self_add_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_self_add_with_coarse_reduction"); - constexpr uint64_t twice_not_modulus_0 = twice_not_modulus.data[0]; constexpr uint64_t twice_not_modulus_1 = twice_not_modulus.data[1]; constexpr uint64_t twice_not_modulus_2 = twice_not_modulus.data[2]; @@ -264,8 +252,6 @@ template void field::asm_self_add_with_coarse_reduction(const field template field field::asm_sub_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_sub_with_coarse_reduction"); - field r; constexpr uint64_t twice_modulus_0 = twice_modulus.data[0]; @@ -291,8 +277,6 @@ template field field::asm_sub_with_coarse_reduction(const field& template void field::asm_self_sub_with_coarse_reduction(const field& a, const field& b) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_self_sub_with_coarse_reduction"); - constexpr uint64_t twice_modulus_0 = twice_modulus.data[0]; constexpr uint64_t twice_modulus_1 = twice_modulus.data[1]; constexpr uint64_t twice_modulus_2 = twice_modulus.data[2]; @@ -314,8 +298,6 @@ template void field::asm_self_sub_with_coarse_reduction(const field template void field::asm_conditional_negate(field& r, const uint64_t predicate) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_conditional_negate"); - constexpr uint64_t twice_modulus_0 = twice_modulus.data[0]; constexpr uint64_t twice_modulus_1 = twice_modulus.data[1]; constexpr uint64_t twice_modulus_2 = twice_modulus.data[2]; @@ -348,8 +330,6 @@ template void field::asm_conditional_negate(field& r, const uint64_ template field field::asm_reduce_once(const field& a) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_reduce_once"); - field r; constexpr uint64_t not_modulus_0 = not_modulus.data[0]; @@ -373,8 +353,6 @@ template field field::asm_reduce_once(const field& a) noexcept template void field::asm_self_reduce_once(const field& a) noexcept { - BB_OP_COUNT_TRACK_NAME("fr::asm_self_reduce_once"); - constexpr uint64_t not_modulus_0 = not_modulus.data[0]; constexpr uint64_t not_modulus_1 = not_modulus.data[1]; constexpr uint64_t not_modulus_2 = not_modulus.data[2]; @@ -392,4 +370,4 @@ template void field::asm_self_reduce_once(const field& a) noexcept : "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"); } } // namespace bb -#endif \ No newline at end of file +#endif diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp index d9d834367380..36a1e15ea90c 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp @@ -6,7 +6,7 @@ #pragma once #include "barretenberg/common/assert.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/ecc/groups/element.hpp" #include "element.hpp" @@ -450,7 +450,6 @@ constexpr element element::operator+=(const element& other template constexpr element element::operator+(const element& other) const noexcept { - BB_OP_COUNT_TRACK_NAME("element::operator+"); element result(*this); return (result += other); } @@ -465,7 +464,6 @@ constexpr element element::operator-=(const element& other template constexpr element element::operator-(const element& other) const noexcept { - BB_OP_COUNT_TRACK(); element result(*this); return (result -= other); } @@ -796,8 +794,8 @@ template std::vector> element::batch_mul_with_endomorphism( const std::span>& points, const Fr& scalar) noexcept { - PROFILE_THIS(); - typedef affine_element affine_element; + BB_BENCH(); + using affine_element = affine_element; const size_t num_points = points.size(); // Space for temporary values @@ -835,20 +833,6 @@ std::vector> element::batch_mul_with_endomo } }; - /** - * @brief Perform batch affine addition in parallel - * - */ - const auto batch_affine_add_internal = - [num_points, &scratch_space, &batch_affine_add_chunked](const affine_element* lhs, affine_element* rhs) { - parallel_for_heuristic( - num_points, - [&](size_t start, size_t end, BB_UNUSED size_t chunk_index) { - batch_affine_add_chunked(lhs + start, rhs + start, end - start, &scratch_space[0] + start); - }, - thread_heuristics::FF_ADDITION_COST * 6 + thread_heuristics::FF_MULTIPLICATION_COST * 6); - }; - /** * @brief Perform point doubling lhs[i]=lhs[i]+lhs[i] with batch inversion * @@ -880,18 +864,6 @@ std::vector> element::batch_mul_with_endomo lhs[i].y = personal_scratch_space[i] * (temp - lhs[i].x) - lhs[i].y; } }; - /** - * @brief Perform point doubling in parallel - * - */ - const auto batch_affine_double = [num_points, &scratch_space, &batch_affine_double_chunked](affine_element* lhs) { - parallel_for_heuristic( - num_points, - [&](size_t start, size_t end, BB_UNUSED size_t chunk_index) { - batch_affine_double_chunked(lhs + start, end - start, &scratch_space[0] + start); - }, - thread_heuristics::FF_ADDITION_COST * 7 + thread_heuristics::FF_MULTIPLICATION_COST * 6); - }; // We compute the resulting point through WNAF by evaluating (the (\sum_i (16ⁱ⋅ // (a_i ∈ {-15,-13,-11,-9,-7,-5,-3,-1,1,3,5,7,9,11,13,15}))) - skew), where skew is 0 or 1. The result of the sum is @@ -901,8 +873,7 @@ std::vector> element::batch_mul_with_endomo // hot loop since the slow the computation down. So it's better to just handle it here. if (scalar == -Fr::one()) { std::vector results(num_points); - parallel_for_heuristic( - num_points, [&](size_t i) { results[i] = -points[i]; }, thread_heuristics::FF_COPY_COST); + parallel_for_heuristic(num_points, [&](size_t i) { results[i] = -points[i]; }, thread_heuristics::FF_COPY_COST); return results; } // Compute wnaf for scalar @@ -913,57 +884,64 @@ std::vector> element::batch_mul_with_endomo affine_element result{ Fq::zero(), Fq::zero() }; result.self_set_infinity(); std::vector results(num_points); - parallel_for_heuristic( - num_points, [&](size_t i) { results[i] = result; }, thread_heuristics::FF_COPY_COST); + parallel_for_heuristic(num_points, [&](size_t i) { results[i] = result; }, thread_heuristics::FF_COPY_COST); return results; } constexpr size_t LOOKUP_SIZE = 8; constexpr size_t NUM_ROUNDS = 32; + + detail::EndoScalars endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); + detail::EndomorphismWnaf wnaf{ endo_scalars }; + + std::vector work_elements(num_points); std::array, LOOKUP_SIZE> lookup_table; for (auto& table : lookup_table) { table.resize(num_points); } - // Initialize first etnries in lookup table std::vector temp_point_vector(num_points); - parallel_for_heuristic( - num_points, - [&](size_t i) { - // If the point is at infinity we fix-up the result later - // To avoid 'trying to invert zero in the field' we set the point to 'one' here - temp_point_vector[i] = points[i].is_point_at_infinity() ? affine_element::one() : points[i]; - lookup_table[0][i] = points[i].is_point_at_infinity() ? affine_element::one() : points[i]; - }, - thread_heuristics::FF_COPY_COST * 2); - - // Construct lookup table - batch_affine_double(&temp_point_vector[0]); - for (size_t j = 1; j < LOOKUP_SIZE; ++j) { - parallel_for_heuristic( - num_points, - [&](size_t i) { lookup_table[j][i] = lookup_table[j - 1][i]; }, - thread_heuristics::FF_COPY_COST); - batch_affine_add_internal(&temp_point_vector[0], &lookup_table[j][0]); - } - detail::EndoScalars endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); - detail::EndomorphismWnaf wnaf{ endo_scalars }; + auto execute_range = [&](size_t start, size_t end) { + // Perform batch affine addition in parallel + const auto add_chunked = [&](const affine_element* lhs, affine_element* rhs) { + batch_affine_add_chunked(&lhs[start], &rhs[start], end - start, &scratch_space[start]); + }; - std::vector work_elements(num_points); + // Perform point doubling in parallel + const auto double_chunked = [&](affine_element* lhs) { + batch_affine_double_chunked(&lhs[start], end - start, &scratch_space[start]); + }; - constexpr Fq beta = Fq::cube_root_of_unity(); - uint64_t wnaf_entry = 0; - uint64_t index = 0; - bool sign = 0; - // Prepare elements for the first batch addition - for (size_t j = 0; j < 2; ++j) { - wnaf_entry = wnaf.table[j]; - index = wnaf_entry & 0x0fffffffU; - sign = static_cast((wnaf_entry >> 31) & 1); - const bool is_odd = ((j & 1) == 1); - parallel_for_heuristic( - num_points, - [&](size_t i) { + // Initialize first entries in lookup table + for (size_t i = start; i < end; ++i) { + if (points[i].is_point_at_infinity()) { + temp_point_vector[i] = affine_element::one(); + lookup_table[0][i] = affine_element::one(); + } else { + temp_point_vector[i] = points[i]; + lookup_table[0][i] = points[i]; + } + } + // Costruct lookup table + double_chunked(&temp_point_vector[0]); + for (size_t j = 1; j < LOOKUP_SIZE; ++j) { + for (size_t i = start; i < end; ++i) { + lookup_table[j][i] = lookup_table[j - 1][i]; + } + add_chunked(&temp_point_vector[0], &lookup_table[j][0]); + } + + constexpr Fq beta = Fq::cube_root_of_unity(); + uint64_t wnaf_entry = 0; + uint64_t index = 0; + bool sign = 0; + // Prepare elements for the first batch addition + for (size_t j = 0; j < 2; ++j) { + wnaf_entry = wnaf.table[j]; + index = wnaf_entry & 0x0fffffffU; + sign = static_cast((wnaf_entry >> 31) & 1); + const bool is_odd = ((j & 1) == 1); + for (size_t i = start; i < end; ++i) { auto to_add = lookup_table[static_cast(index)][i]; to_add.y.self_conditional_negate(sign ^ is_odd); if (is_odd) { @@ -974,64 +952,51 @@ std::vector> element::batch_mul_with_endomo } else { temp_point_vector[i] = to_add; } - }, - (is_odd ? thread_heuristics::FF_MULTIPLICATION_COST : 0) + thread_heuristics::FF_COPY_COST + - thread_heuristics::FF_ADDITION_COST); - } - // First cycle of addition - batch_affine_add_internal(&temp_point_vector[0], &work_elements[0]); - // Run through SM logic in wnaf form (excluding the skew) - for (size_t j = 2; j < NUM_ROUNDS * 2; ++j) { - wnaf_entry = wnaf.table[j]; - index = wnaf_entry & 0x0fffffffU; - sign = static_cast((wnaf_entry >> 31) & 1); - const bool is_odd = ((j & 1) == 1); - if (!is_odd) { - for (size_t k = 0; k < 4; ++k) { - batch_affine_double(&work_elements[0]); } } - parallel_for_heuristic( - num_points, - [&](size_t i) { + add_chunked(&temp_point_vector[0], &work_elements[0]); + // Run through SM logic in wnaf form (excluding the skew) + for (size_t j = 2; j < NUM_ROUNDS * 2; ++j) { + wnaf_entry = wnaf.table[j]; + index = wnaf_entry & 0x0fffffffU; + sign = static_cast((wnaf_entry >> 31) & 1); + const bool is_odd = ((j & 1) == 1); + if (!is_odd) { + for (size_t k = 0; k < 4; ++k) { + double_chunked(&work_elements[0]); + } + } + for (size_t i = start; i < end; ++i) { auto to_add = lookup_table[static_cast(index)][i]; to_add.y.self_conditional_negate(sign ^ is_odd); if (is_odd) { to_add.x *= beta; } temp_point_vector[i] = to_add; - }, - (is_odd ? thread_heuristics::FF_MULTIPLICATION_COST : 0) + thread_heuristics::FF_COPY_COST + - thread_heuristics::FF_ADDITION_COST); - batch_affine_add_internal(&temp_point_vector[0], &work_elements[0]); - } - - // Apply skew for the first endo scalar - if (wnaf.skew) { - parallel_for_heuristic( - num_points, - [&](size_t i) { temp_point_vector[i] = -lookup_table[0][i]; }, - thread_heuristics::FF_ADDITION_COST + thread_heuristics::FF_COPY_COST); - batch_affine_add_internal(&temp_point_vector[0], &work_elements[0]); - } - // Apply skew for the second endo scalar - if (wnaf.endo_skew) { - parallel_for_heuristic( - num_points, - [&](size_t i) { + } + add_chunked(&temp_point_vector[0], &work_elements[0]); + } + // Apply skew for the first endo scalar + if (wnaf.skew) { + for (size_t i = start; i < end; ++i) { + temp_point_vector[i] = -lookup_table[0][i]; + } + add_chunked(&temp_point_vector[0], &work_elements[0]); + } + // Apply skew for the second endo scalar + if (wnaf.endo_skew) { + for (size_t i = start; i < end; ++i) { temp_point_vector[i] = lookup_table[0][i]; temp_point_vector[i].x *= beta; - }, - thread_heuristics::FF_MULTIPLICATION_COST + thread_heuristics::FF_COPY_COST); - batch_affine_add_internal(&temp_point_vector[0], &work_elements[0]); - } - // handle points at infinity explicitly - parallel_for_heuristic( - num_points, - [&](size_t i) { + } + add_chunked(&temp_point_vector[0], &work_elements[0]); + } + // handle points at infinity explicitly + for (size_t i = start; i < end; ++i) { work_elements[i] = points[i].is_point_at_infinity() ? work_elements[i].set_infinity() : work_elements[i]; - }, - thread_heuristics::FF_COPY_COST); + } + }; + parallel_for_range(num_points, execute_range); return work_elements; } diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp index 53d366c4e81d..e7f2e6d925ff 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp @@ -13,7 +13,7 @@ namespace bb::wnaf { constexpr size_t SCALAR_BITS = 127; -#define WNAF_SIZE(x) ((bb::wnaf::SCALAR_BITS + (x)-1) / (x)) // NOLINT(cppcoreguidelines-macro-usage) +#define WNAF_SIZE(x) ((bb::wnaf::SCALAR_BITS + (x) - 1) / (x)) // NOLINT(cppcoreguidelines-macro-usage) constexpr size_t get_optimal_bucket_width(const size_t num_points) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/bitvector.hpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/bitvector.hpp index e9c6b624c1b3..e3432d49f2b8 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/bitvector.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/bitvector.hpp @@ -21,7 +21,7 @@ class BitVector { BB_INLINE void set(size_t index, bool value) noexcept { - BB_ASSERT_LT(index, num_bits_); + ASSERT_DEBUG(index < num_bits_); const size_t word = index >> 6; const size_t bit = index & 63; @@ -35,7 +35,7 @@ class BitVector { BB_INLINE bool get(size_t index) const noexcept { - BB_ASSERT_LT(index, num_bits_); + ASSERT_DEBUG(index < num_bits_); const uint64_t word = index >> 6; const uint64_t bit = index & 63; return ((data_[static_cast(word)] >> bit) & 1) == 1; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp index 804d32c34f37..cb56dfdcf5ef 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp @@ -65,11 +65,11 @@ void MSM::transform_scalar_and_get_nonzero_scalar_indices(std::span start); std::vector& thread_scalar_indices = thread_indices[thread_idx]; thread_scalar_indices.reserve(end - start); for (size_t i = start; i < end; ++i) { - BB_ASSERT_LT(i, scalars.size()); + ASSERT_DEBUG(i < scalars.size()); auto& scalar = scalars[i]; scalar.self_from_montgomery_form(); @@ -115,7 +115,7 @@ void MSM::transform_scalar_and_get_nonzero_scalar_indices(std::span std::vector::ThreadWorkUnits> MSM::get_work_units( - std::vector>& scalars, std::vector>& msm_scalar_indices) noexcept + std::span> scalars, std::vector>& msm_scalar_indices) noexcept { const size_t num_msms = scalars.size(); @@ -154,7 +154,7 @@ std::vector::ThreadWorkUnits> MSM::get_work_units( size_t thread_accumulated_work = 0; size_t current_thread_idx = 0; for (size_t i = 0; i < num_msms; ++i) { - BB_ASSERT_LT(i, msm_scalar_indices.size()); + ASSERT_DEBUG(i < msm_scalar_indices.size()); size_t msm_work = msm_scalar_indices[i].size(); size_t msm_size = msm_work; while (msm_work > 0) { @@ -450,9 +450,9 @@ typename Curve::Element MSM::evaluate_small_pippenger_round(MSMData& msm_ const size_t size = nonzero_scalar_indices.size(); for (size_t i = 0; i < size; ++i) { - BB_ASSERT_LT(nonzero_scalar_indices[i], scalars.size()); + ASSERT_DEBUG(nonzero_scalar_indices[i] < scalars.size()); uint32_t bucket_index = get_scalar_slice(scalars[nonzero_scalar_indices[i]], round_index, bits_per_slice); - BB_ASSERT_LT(bucket_index, static_cast(1 << bits_per_slice)); + ASSERT_DEBUG(bucket_index < static_cast(1 << bits_per_slice)); if (bucket_index > 0) { // do this check because we do not reset bucket_data.buckets after each round // (i.e. not neccessarily at infinity) @@ -511,14 +511,14 @@ typename Curve::Element MSM::evaluate_pippenger_round(MSMData& msm_data, // 1. low 32 bits: which bucket index do we add the point into? (bucket index = slice value) // 2. high 32 bits: which point index do we source the point from? for (size_t i = 0; i < size; ++i) { - BB_ASSERT_LT(scalar_indices[i], scalars.size()); + ASSERT_DEBUG(scalar_indices[i] < scalars.size()); round_schedule[i] = get_scalar_slice(scalars[scalar_indices[i]], round_index, bits_per_slice); round_schedule[i] += (static_cast(scalar_indices[i]) << 32ULL); } // Sort our point schedules based on their bucket values. Reduces memory throughput in next step of algo const size_t num_zero_entries = scalar_multiplication::process_buckets_count_zero_entries( &round_schedule[0], size, static_cast(bits_per_slice)); - BB_ASSERT_LTE(num_zero_entries, size); + ASSERT_DEBUG(num_zero_entries <= size); const size_t round_size = size - num_zero_entries; Element round_output; @@ -599,9 +599,10 @@ void MSM::consume_point_schedule(std::span point_schedule } // We do some branchless programming here to minimize instruction pipeline flushes - // TODO(@zac-williamson, cc @ludamad) check these ternary operators are not branching! - // We are iterating through our points and can come across the following scenarios: - // 1: The next 2 points in `point_schedule` belong to the *same* bucket + // TODO(@zac-williamson, cc @ludamad) check these ternary operators are not branching! -> (ludamad: they don't, + // but its not clear that the conditional move is fundamentally less expensive) + // We are iterating through our points and + // can come across the following scenarios: 1: The next 2 points in `point_schedule` belong to the *same* bucket // (happy path - can put both points into affine_addition_scratch_space) // 2: The next 2 points have different bucket destinations AND point_schedule[point_it].bucket contains a point // (happyish path - we can put points[lhs_schedule] and buckets[lhs_bucket] into @@ -664,7 +665,7 @@ void MSM::consume_point_schedule(std::span point_schedule affine_input_it += 2; point_it += 1; } else { // otherwise, cache the point into the bucket - BB_ASSERT_LT(lhs_point, points.size()); + ASSERT_DEBUG(lhs_point < points.size()); bucket_accumulators[lhs_bucket] = points[lhs_point]; bucket_accumulator_exists.set(lhs_bucket, true); point_it += 1; @@ -691,7 +692,7 @@ void MSM::consume_point_schedule(std::span point_schedule while ((affine_output_it < (num_affine_output_points - 1)) && (num_affine_output_points > 0)) { size_t lhs_bucket = static_cast(affine_addition_output_bucket_destinations[affine_output_it]); size_t rhs_bucket = static_cast(affine_addition_output_bucket_destinations[affine_output_it + 1]); - BB_ASSERT_LT(lhs_bucket, bucket_accumulator_exists.size()); + ASSERT_DEBUG(lhs_bucket < bucket_accumulator_exists.size()); bool has_bucket_accumulator = bucket_accumulator_exists.get(lhs_bucket); bool buckets_match = (lhs_bucket == rhs_bucket); @@ -723,9 +724,9 @@ void MSM::consume_point_schedule(std::span point_schedule bool has_bucket_accumulator = bucket_accumulator_exists.get(lhs_bucket); if (has_bucket_accumulator) { - BB_ASSERT_LT(new_scratch_space_it + 1, affine_addition_scratch_space.size()); - BB_ASSERT_LT(lhs_bucket, bucket_accumulators.size()); - BB_ASSERT_LT(new_scratch_space_it >> 1, output_point_schedule.size()); + ASSERT_DEBUG(new_scratch_space_it + 1 < affine_addition_scratch_space.size()); + ASSERT_DEBUG(lhs_bucket < bucket_accumulators.size()); + ASSERT_DEBUG((new_scratch_space_it >> 1) < output_point_schedule.size()); affine_addition_scratch_space[new_scratch_space_it] = affine_output[affine_output_it]; affine_addition_scratch_space[new_scratch_space_it + 1] = bucket_accumulators[lhs_bucket]; bucket_accumulator_exists.set(lhs_bucket, false); @@ -761,8 +762,8 @@ void MSM::consume_point_schedule(std::span point_schedule */ template std::vector MSM::batch_multi_scalar_mul( - std::vector>& points, - std::vector>& scalars, + std::span> points, + std::span> scalars, bool handle_edge_cases) noexcept { BB_ASSERT_EQ(points.size(), scalars.size()); diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp index 206a3cfdffa4..31a0742d7af0 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp @@ -94,7 +94,7 @@ template class MSM { static void transform_scalar_and_get_nonzero_scalar_indices(std::span scalars, std::vector& consolidated_indices) noexcept; - static std::vector get_work_units(std::vector>& scalars, + static std::vector get_work_units(std::span> scalars, std::vector>& msm_scalar_indices) noexcept; static uint32_t get_scalar_slice(const ScalarField& scalar, size_t round, size_t normal_slice_size) noexcept; static size_t get_optimal_log_num_buckets(const size_t num_points) noexcept; @@ -122,8 +122,8 @@ template class MSM { size_t num_input_points_processed, size_t num_queued_affine_points) noexcept; - static std::vector batch_multi_scalar_mul(std::vector>& points, - std::vector>& scalars, + static std::vector batch_multi_scalar_mul(std::span> points, + std::span> scalars, bool handle_edge_cases = true) noexcept; static AffineElement msm(std::span points, PolynomialSpan _scalars, @@ -132,7 +132,7 @@ template class MSM { template static Element accumulate_buckets(BucketType& bucket_accumulators) noexcept { auto& buckets = bucket_accumulators.buckets; - BB_ASSERT_GT(buckets.size(), static_cast(0)); + ASSERT_DEBUG(buckets.size() > static_cast(0)); int starting_index = static_cast(buckets.size() - 1); Element prefix_sum; bool found_start = false; @@ -149,7 +149,7 @@ template class MSM { if (!found_start) { return Curve::Group::point_at_infinity; } - BB_ASSERT_GT(starting_index, 0); + ASSERT_DEBUG(starting_index > 0); AffineElement offset_generator = Curve::Group::affine_point_at_infinity; if constexpr (std::same_as) { constexpr auto gen = get_precomputed_generators()[0]; @@ -161,7 +161,7 @@ template class MSM { Element sum = prefix_sum + offset_generator; for (int i = static_cast(starting_index - 1); i > 0; --i) { size_t idx = static_cast(i); - BB_ASSERT_LT(idx, bucket_accumulators.bucket_exists.size()); + ASSERT_DEBUG(idx < bucket_accumulators.bucket_exists.size()); if (bucket_accumulators.bucket_exists.get(idx)) { prefix_sum += buckets[idx]; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp index c7bb07aebcf7..ef9a20a591b0 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp @@ -1,5 +1,6 @@ #include "scalar_multiplication.hpp" #include "barretenberg/api/file_io.hpp" +#include "barretenberg/common/thread.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/ecc/curves/types.hpp" @@ -97,10 +98,10 @@ TYPED_TEST(ScalarMultiplicationTest, GetScalarSlice) uint256_t acc = input_u256; for (size_t i = 0; i < num_slices; ++i) { - size_t mask = ((1 << slice_bits) - 1); + size_t mask = ((1 << slice_bits) - 1UL); size_t shift = slice_bits; if (i == 0) { - mask = ((1 << last_slice_bits) - 1); + mask = ((1UL << last_slice_bits) - 1UL); shift = last_slice_bits; } slices[num_slices - 1 - i] = static_cast((acc & mask).data[0]); @@ -356,6 +357,7 @@ TYPED_TEST(ScalarMultiplicationTest, PippengerLowMemory) TYPED_TEST(ScalarMultiplicationTest, BatchMultiScalarMul) { + BB_BENCH_NAME("BatchMultiScalarMul"); SCALAR_MULTIPLICATION_TYPE_ALIASES using AffineElement = typename Curve::AffineElement; @@ -479,6 +481,90 @@ TYPED_TEST(ScalarMultiplicationTest, MSMEmptyPolynomial) EXPECT_EQ(result, Curve::Group::affine_point_at_infinity); } +// Helper function to generate scalars with specified sparsity +template +std::vector generate_sparse_scalars(size_t num_scalars, double sparsity_rate, auto& rng) +{ + std::vector scalars(num_scalars); + for (size_t i = 0; i < num_scalars; ++i) { + // Generate random value to determine if this scalar should be zero + double rand_val = static_cast(rng.get_random_uint32()) / static_cast(UINT32_MAX); + if (rand_val < sparsity_rate) { + scalars[i] = 0; + } else { + scalars[i] = ScalarField::random_element(&rng); + } + } + return scalars; +} + +// Test different MSM strategies with detailed benchmarking +// NOTE this requres BB_BENCH=1 to be set before the test command +TYPED_TEST(ScalarMultiplicationTest, BenchBatchMsm) +{ +#ifndef __wasm__ + if (!bb::detail::use_bb_bench) { +#else + { +#endif + std::cout + << "Skipping BatchMultiScalarMulStrategyComparison as BB_BENCH=1 is not passed (OR we are in wasm).\n"; + return; + } + SCALAR_MULTIPLICATION_TYPE_ALIASES + + using AffineElement = typename Curve::AffineElement; + + const size_t num_msms = 3; + const size_t msm_max_size = 1 << 17; + const double max_sparsity = 0.1; + + // Generate test data with varying sparsity + std::vector> all_points; + std::vector> all_scalars; + std::vector all_commitments; + std::vector> scalar_storage; + + for (size_t i = 0; i < num_msms; ++i) { + // Generate random sizes and density of 0s + const size_t size = engine.get_random_uint64() % msm_max_size; + const double sparsity = engine.get_random_uint8() / 255.0 * max_sparsity; + auto scalars = generate_sparse_scalars(size, sparsity, engine); + scalar_storage.push_back(std::move(scalars)); + + std::span points(&TestFixture::generators[i], size); + all_points.push_back(points); + all_scalars.push_back(scalar_storage.back()); + all_commitments.push_back(TestFixture::naive_msm(all_scalars.back(), all_points.back())); + } + auto func = [&](size_t num_threads) { + set_parallel_for_concurrency(num_threads); + // Strategy 1: Individual MSMs + { + BB_BENCH_NAME((bb::detail::concat())); + for (size_t i = 0; i < num_msms; ++i) { + std::vector> single_points = { all_points[i] }; + std::vector> single_scalars = { all_scalars[i] }; + auto result = scalar_multiplication::MSM::batch_multi_scalar_mul(single_points, single_scalars); + EXPECT_EQ(result[0], all_commitments[i]); + } + } + // Strategy 2: Batch + { + BB_BENCH_NAME((bb::detail::concat())); + auto result = scalar_multiplication::MSM::batch_multi_scalar_mul(all_points, all_scalars); + EXPECT_EQ(result, all_commitments); + } + }; + // call lambda with template param + func.template operator()<"1 thread ">(1); + func.template operator()<"2 threads ">(2); + func.template operator()<"4 threads ">(4); + func.template operator()<"8 threads ">(8); + func.template operator()<"16 threads ">(16); + func.template operator()<"32 threads ">(32); +} + TEST(ScalarMultiplication, SmallInputsExplicit) { uint256_t x0(0x68df84429941826a, 0xeb08934ed806781c, 0xc14b6a2e4f796a73, 0x08dc1a9a11a3c8db); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/README.md b/barretenberg/cpp/src/barretenberg/eccvm/README.md new file mode 100644 index 000000000000..c9a356e54202 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/eccvm/README.md @@ -0,0 +1,659 @@ +# ECCVM (ElliptiC Curve Virtual Machine) in Barretenberg + +> **Warning:** This document is intended to provide an overview of the ECCVM in barretenberg. It is not a complete specification and does not cover all edge cases or optimizations. The source code should be consulted for a complete understanding of the implementation. + +## Punchline + +The ECCVM efficiently proves the correct execution of accumulated elliptic curve operations on the BN-254 curve. It does this by witnessing the correct execution into a table of numbers (in the same field as the base field of the elliptic curve) and applying polynomial constraints, multiset equality-checks, and lookup arguments. + +## Notation + +\f$\newcommand{\fq}{\mathbb{F}_q}\f$ +\f$\newcommand{\fr}{\mathbb{F}_r}\f$ +\f$\newcommand{\zr}{\mathbb{Z}/r\mathbb{Z}}\f$ +\f$\newcommand{\zq}{\mathbb{Z}/q\mathbb{Z}}\f$ +\f$\newcommand{\NeutralElt}{\mathcal{O}}\f$ + +- \f$\fq\f$ is the prime field of size \f$q = 21888242871839275222246405745257275088696311157297823662689037894645226208583\f$. +- \f$\fr\f$ is the prime field of size \f$r = 21888242871839275222246405745257275088548364400416034343698204186575808495617\f$. +- \f$E/\fq\f$ is the elliptic curve whose Weierstrass equation is \f$y^2 = x^3 + 3\f$. This is known as the _BN-254_ curve. +- The element \f$\NeutralElt\f$ refers to the neutral element of \f$E\f$, i.e., the point at infinity. We internally represent it in affine coordinates as \f$(0, 0)\f$ for efficiency, although \f$(0, 0)\f$ is not a point on the curve. +- \f$C/\fr\f$ is the elliptic curve whose Weierstrass equation is \f$y^2 = x^3 - 17\f$. This is known as the _Grumpkin_ curve. + +We have the following facts: + +- \f$2r>q>r\f$ +- \f$C(\fr)\f$ is a cyclic group of order \f$q\f$, i.e., is isomorphic to \f$\zq\f$ +- \f$E(\fq)\f$ is a cyclic group of order \f$r\f$, i.e., is isomorphic to \f$\zr\f$. + +In general, \f$\zq\f$ and \f$\zr\f$ refer to the additive abelian groups; we use \f$\fq\f$ and \f$\fr\f$ when we require the multiplicative structure. We do not strictly abide by this convention (common in cryptography), but it helps disambiguate usage. + +We also use the following constants: + +- \f$w=\texttt{NUM-WNAF-DIGIT-BITS} = 4\f$ +- \f$\texttt{NUM-SCALAR-BITS} = 128\f$ +- \f$\texttt{NUM-WNAF-DIGITS-PER-SCALAR}=\texttt{NUM-SCALAR-BITS} / \texttt{NUM-WNAF-DIGIT-BITS} = 32\f$ +- \f$\texttt{ADDITIONS-PER-ROW} = 4\f$ + +Finally, the terminology `pc` stands for _point-counter_. (In particular, it does _not_ stand for "program counter".) + +## Bird's eye overview/motivation + +In a nutshell, the ECCVM is a simple virtual machine to facilitate the verification of native elliptic curve computations. Given an `op_queue` of BN-254 operations, the ECCVM compiles the execution of these operations into an _execution trace representation_ over \f$\fq\f$ (the field of definition / base field of BN-254). This field is also the scalar field of Grumpkin. + +In a bit more detail, the ECCVM is a compiler that takes a sequence of operations (in BN-254) and produces a table of numbers (in \f$\fq\f$), such that the correct evaluation of the sequence of operations precisely corresponds to polynomial constraints vanishing on the rows of this table. Moreover, these polynomial constraints are independent of the specific sequence of operations. As our tables of numbers have elements in \f$\fq\f$, the _native field_ of the circuit is \f$\fq\f$. When we prove these constraints, we use the Grumpkin curve \f$C\f$. + +The core complication comes from the _efficient_ handling of scalar multiplications. Due to MSM optimizations, we effectively produce _three_ tables, where each table has its own set of multivariate polynomials, such that correct evaluation corresponds to those polynomials vanishing row-wise. These tables "communicate" via strict lookup arguments and multiset-equality checks. + +Earlier [documentation](https://hackmd.io/@aztec-network/rJ5xhuCsn?type=view) exists. While it does not exactly match the current codebase, it is a helpful guide; this document is an updated explication. + +## Op Queue + +We first specify the allowable operations; the OpQueue is roughly a list of operations on a fixed elliptic curve, including a running accumulator which propagates from instruction to instruction. It may be seen as a finite state machine processing simple elliptic curve operations with a single memory register. + +### Operations + +At any moment we have an accumulated value \f$A\f$, and the potential operations are: `add`, `mul`, `eq`, `reset`, `eq_and_reset`. There are four selectors \f$q_{\text{add}}, q_{\text{mul}}, q_{\text{eq}}, q_{\text{reset}}\f$, so all operations except `eq_and_reset` correspond to a unique selector being on. Given an operation, we have an associated opcode value: + +| `EccOpCode` | Op Code Value | +| -------------- | ---------------------------- | +| `add` | \f$\texttt{1000} \equiv 8\f$ | +| `mul` | \f$\texttt{0100} \equiv 4\f$ | +| `eq_and_reset` | \f$\texttt{0011} \equiv 3\f$ | +| `eq` | \f$\texttt{0010} \equiv 2\f$ | +| `reset` | \f$\texttt{0001} \equiv 1\f$ | + +On the level of selectors, +\f[ +\texttt{opcode_value}=8\,q_{\text{add}} + 4\,q_{\text{mul}} + 2\,q_{\text{eq}} + q_{\text{reset}}. +\f] + +#### Description of operations + +- `add` takes a point \f$P\f$ and updates \f$A \leftarrow A + P\f$. +- `mul` takes \f$P\f$ and \f$s \in \fr\f$ and updates \f$A \leftarrow A + sP\f$. +- `eq` takes \f$P\f$ and "checks" \f$A == P\f$. +- `reset` sets \f$A \leftarrow \NeutralElt\f$. +- `eq_and_reset` takes \f$P\f$, checks \f$A == P\f$, and then sets \f$A \leftarrow \NeutralElt\f$. + +### Decomposing scalars + +_Decomposing scalars_ is an important optimization for (multi)scalar multiplications, especially when many scalars are 128-bit. + +Both \f$\fr\f$ and \f$\fq\f$ have primitive cube roots of unity (their orders are \f$\equiv 1 \pmod{3}\f$). Fix \f$\beta \in \fq\f$ a primitive cube root of unity. It induces an order-6 automorphism \f$\varphi\f$ of BN-254: +\f[ +\varphi: (x,y) \mapsto (\beta x, -y). +\f] + +As \f$E(\fq) \cong \zr\f$, and the natural map \f$\fr \rightarrow \mathrm{End}_{\mathrm{ab.gp}}(\zr)\f$ is an isomorphism, \f$\varphi\f$ corresponds to \f$\zeta \in \fr\f$ satisfying +\f[ +\zeta^2 - \zeta + 1 = 0. +\f] +In particular, \f$\lambda := -\zeta\f$ is a cube root of unity in \f$\fr\f$ and satisfies \f$\lambda^2 + \lambda + 1 = 0\f$. + +Given \f$s \in \zr\f$, we can write \f$s = z_1 - \lambda z_2 = z_1 + \zeta z_2\f$ with "small" \f$z_i\f$. Consider the lattice +\f$L := \ker\big( \mathbb{Z}^2 \to \zr\big)\f$, \f$(a,b)\mapsto a + \zeta b\f$. A fundamental domain around the origin lies inside a box with side length \f$B := \frac{\sqrt{3r}}{2} < 2^{128}\f$, hence \f$z_i\f$ fit in 128 bits. See `split_into_endomorphism_scalars` method in the field module for details. + +### Column representation (a.k.a. the Input Trace) + +An operation in the OpQueue may be entered into a table as follows: + +| `op` | `X` | `Y` | `z_1` | `z_2` | `mul_scalar_full` | + +Here, `op` is the value of the operation, \f$(X, Y)\f$ are the _affine_ coordinates of \f$P\f$, `mul_scalar_full` stands for "full scalar if the operation is `mul`" (so is an element of \f$\fr\f$), and `z_1` and `z_2` are a decomposition of `mul_scalar_full` as explained [above](#decomposing-scalars). In particular, `z_1` and `z_2` may each be represented by 128 bits. + +### VM operations + +The column representation is naturally equivalent to the representation as a VM operation. + +``` +struct ECCVMOperation { + using Curve = curve::BN254; + using AffineElement = Curve::Group::affine_element; + using Fr = Curve::ScalarField; + EccOpCode op_code = {}; + AffineElement base_point = { 0, 0 }; + uint256_t z1 = 0; + uint256_t z2 = 0; + Fr mul_scalar_full = 0; + bool operator==(const ECCVMOperation& other) const = default; +}; +``` + +### Op Queue + +From the perspective of the ECCVM, the `ECCOpQueue` just contains a list of `ECCVMOperation`s, i.e., it is just an Input Trace. It is worth noting that the `ECCOpQueue` class indeed contains more moving parts, to link together the ECCVM with the rest of the Goblin protocol. + +### State Machine and the execution trace + +As explained, the `ECCOpQueue` corresponds to a one-register finite state machine whose primitives are a set of operations on our elliptic curve. + +From this perspective, the goal of the ECCVM is to compile the execution of this state machine. The ECCVM takes in an `ECCOpQueue`, which corresponds to the execution of a list of operations in BN-254, and constructs three tables, together with a collection of multivariate polynomials for each table, along with some lookups and multiset constraints. (The number of variables of a polynomial associated with a table is precisely the number of columns of that table.) Then the key claim is that if (1) the polynomials associated to each table vanish on every row, (2) the lookups are satisfied, and some multi-set equivalences hold (which mediate _between_ tables), then the tables corresponds to the correct execution of the `ECCOpQueue`, i.e., to the correct execution of the one-register elliptic curve state machine. + +Breaking abstraction, the _reason_ we choose this model of witnessing the computation is that it is straightforward to SNARK. + +## Architecture + +In trying to build the execution trace of `ECCOpQueue`, the `mul` opcode is the only one that is non-trivial to evaluate, especially efficiently. One straightforward way to encode the `mul` operation is to break up the scalar into its bit representation and use a double-and-add procedure. We opt for the Straus MSM algorithm with \f$w=4\f$, which requires more precomputing but is significantly more efficient. + +### High level summary of the operation of the VM + +Before we dive into the Straus algorithm, here is the high-level organization. We go "row by row" in the `ECCOpQueue`; if the instruction is _not_ a `mul`, the `Transcript` table handles it. If it is a `mul` operation, it is _automatically_ part of an MSM (potentially one of length 1), and we defer evaluation to the Straus mechanism (which involves two separate tables: an `MSM` table and a `Precomputed` table). Eventually, at the _end_ of an MSM (i.e., if an op is a `mul` and the next op is not), the Transcript Columns will pick up the claimed evaluation from the MSM tables and continue along their merry way. + +To do this in a moderately efficient manner is involved; we include logic for skipping computations when we can. For instance, if we have a `mul` operation with the base point \f$P=\NeutralElt\f$, then we will have a column that bears witness to this fact and skip the explicit scalar multiplication. Analogously, if the scalar is 0 in a `mul` operation, we also encode skipping the explicit scalar multiplication. This does not merely allow us to save work; it dramatically simplifies the actual MSM computations (especially recursively), by throwing out circumstances when there can be case logic. However, this, together with the delegation of work to multiple tables, itself required by the Straus algorithm, nonetheless results in somewhat complicated column structure. + +However, at least some of this complexity is forced on us; in Barretenberg, we represent the \f$\NeutralElt\f$ of an elliptic curve in Weierstrass form as \f$(0, 0)\f$ for efficiency. (Note that \f$\NeutralElt\f$ is always chosen to be the point-at-infinity and in particular it has no "affine representation". Note further that \f$(0, 0)\f$ is indeed not a point on our elliptic curve!) These issues are worth keeping in mind when examining the ECCVM. + +## Straus Algorithm for MSM + +Recall, our high-level goal is to compute \f[\sum_{i=0}^{m-1} s_i P_i,\f] where \f$s_i\in \fr\f$ and \f$P_i\f$ are points on BN-254, i.e., we want to evaluate a multi-scalar multiplication of length \f$m\f$. We set \f$w=4\f$, as this is our main use-case. (In the code, this is represented as `static constexpr size_t NUM_WNAF_DIGIT_BITS = 4;`.) We have seen about that, setting \f$P'_i:=\varphi(P_i) = \lambda P_i\f$, we may write \f$s_iP_i = z_{i, 1}P_i - z_{i, 2}P'_i\f$, where \f$z_{i,j}\f$ has no more than 128 bits. We therefore assume that our scalars have no greater than 128 bits. + +### wNAF + +The first thing to specify is our windowed non-adjacent form (wNAF). This is an optimization for computing scalar multiplication. Moreover, the fact that we are working with an elliptic curve in Weierstrass form effectively halves the number of precomputes we need to perform. + +**Warning**: our implementation is _not_ what is usually called wNAF. To avoid confusion, we simply avoid discussion on traditional (w)NAF. + +Here is the key mathematical claim: for a 128-bit positive number \f$s\f$, we can uniquely write: +\f[s = \sum_{j=0}^{31} a_j 2^{4j} + \text{skew},\f] +where + +- each \f$a_j\in \{-2^{4}+1, -2^{4}+3,\ldots, 2^{4}-1\}\f$ +- \f$\text{skew}\in\{0, 1\}\f$. + +In our implementation, we force \f$a_{31}>0\f$ to guarantee that \f$s\f$ is positive. Note that the exponent in the range of the digits \f$a_j\f$ is determined by \f$w=\texttt{NUM_WNAF_DIGIT_BITS} = 4\f$. The existence of the `skew` bit is to ensure that we can represent _even_ numbers. + +The above decomposition is referred to in the code as the wNAF representation. Each \f$a_i\f$ is referred to either as a wNAF slice or digit. + +We will come shortly to the algorithm, but as for the motivation: in our implementation, the neutral point of the group (i.e., point-at-infinity) poses some technical challenges for us. We work with the _affine_ representation of elliptic curve points, and \f$\NeutralElt\f$ certainly has no natural affine-coordiante representation. We choose to internally represent it as \f$(0, 0)\f$ (not a point on our curve!) and handle it with separate logic. It is therefore advantageous to avoid having to extraneously perform operations involving \f$\NeutralElt\f$, especially when we are implementing the recursive ECCVM verifier. + +### Straus + +Here is the problem: efficiently compute \f[\sum_i s_i P_i,\f] where the \f$s_i\f$ are 128-bit numbers and \f$P_i\f$ are points in BN-254. (Recall that we reduce to the case of 128-bit scalars by decomposing, as explained [above](#decomposing-scalars).) + +To do this, we break up our computation into steps. + +#### Precomputation + +For each \f$s_i\f$, we expand it in wNAF form:\f$s_i = \sum_{j=0}^{31} a_{i, j} 2^{4j} + \text{skew}_i\f$. + +For every \f$P_i\f$, precompute and store the multiples: \f[\{-15P_i, -13P_i, \ldots, 13P_i, 15P_i\}\f] +as well as \f$2P_i\f$. Note that, \f$E\f$ is represented in Weierstrass form, \f$nP\f$ and \f$-nP\f$ have the same affine \f$y\f$-coordinate and the \f$x\f$-coordinates differ by a sign. + +#### Algorithm + +Here are the static variables we need. + +- `NUM_WNAF_DIGITS_PER_SCALAR=32`. +- `NUM_WNAF_DIGIT_BITS = 4`. +- `ADDITIONS_PER_ROW = 4`. This says that we can do 4 primitive EC additions per "row" of the virtual machine. + +1. Set \f$A = \NeutralElt\f$ to be the neutral element of the group. +2. For \f$j\in [0, \ldots, 31]\f$, do: + 1. For \f$k\in [0,\ldots, \lceil \frac{m-1}{4}\rceil]\f$ (here, \f$k\f$ is the "row" in the VM), do: + 1. Set \f$A\leftarrow A + a_{4k, 31-j}P_{4k} + a_{4k+1, 31-j}P_{4k+1} + a_{4k+2, 31-j}P_{4k+2} + a_{4k+3, 31-j}P_{4k+3}\f$, where the individual scalar multiples are _looked up_ from the precomputed tables indicated in [precomputation](#precomputation). (No accumulations if the points \f$P_{4k+j}\f$ don't exist, which can potentially hold for \f$k=\lceil \frac{m-1}{4}\rceil\f$ and some \f$j\f$.) + 2. If \f$j\neq 31\f$, set \f$A\leftarrow 2^w A= 16 A\f$. +3. For \f$j = 32\f$, do: + 1. For \f$k\in [0,\ldots, \lceil \frac{m-1}{4}\rceil]\f$, do: + 1. Set \f$A\leftarrow A + \text{skew}_{4k}P_{4k} + \text{skew}_{4k+1}P_{4k+1} + \text{skew}_{4k+2}P_{4k+2} + \text{skew}_{4k+3}P_{4k+3}\f$. +4. Return \f$A\f$. + +We picture this algorithm as follows. We build a table, the \f$i^{\text{th}}\f$ row of which is the wNAF expansion of \f$s_i\f$ in most-significant to least-significant order. This means that the first column corresponds to the most significant digit (\f$a_{-, 31}\f$). + +We work column by column (this is the \f$j\f$-loop); for every vertical chunk of 4 elements, we accumulate (i.e., add to an accumulator \f$A\f$) looked up values corresponding to the digit/base-point pair. In the pseudo-code, we have an index \f$31-j\f$ because we want to proceed in decreasing order of significant digits. (Looking forward, a "row" of the MSM table in the ECCVM can handle 4 such additions.) We do this until we exhaust the column. We then multiply the accumulator by \f$16\f$ (as long as we are not at the last digit) and go to the next column. Finally, at the end we handle the `skew` digit. + +## Tables + +We have three tables that mediate the computation. As explained above, all of the computations are easy except for scalar multiplications. We process the computation and chunk what looks like scalar multiplications into MSMs. Here is the brief outline. + +- `transcript_builder`. The transcript columns organize and process all of the computations _except for the scalar multiplications_. In particular, the Transcript Columns _do not bear witness_ to the intermediate computations necessary for MSMs. However, they still "access" the results of these computations. +- `precomputed_tables_builder`. The precomputed columns are: for every \f$P\f$ that occurs in an MSM (which was syntactically pulled out by the `transcript_builder`), we compute/store \f$\{P, 3P, \ldots, 15P, 2P\}\f$. +- `msm_builder` actually computes/constrains the MSMs via the Straus algorithm. + +A final note: apart from three Lagrange columns, all columns are either 1. part of the input trace; or 2. witness columns committed to by the Prover. + +In the following tables, each column has a defined "value range". If the range is not +\f$\fq\f$, the column must be range constrained, either with an explicit range check or implicitly through range constraints placed on other columns that define relations over the target column. + +### Transcript Columns + +\f$\newcommand{\transcriptmsminfinity}{{\mathrm{transcript\_msm\_infinity}}}\f$ +\f$\newcommand{\transcriptaccumulatornotempty}{{\mathrm{transcript\_accumulator\_not\_empty}}}\f$ +\f$\newcommand{\transcriptadd}{{\mathrm{transcript\_add}}}\f$ +\f$\newcommand{\transcriptmul}{{\mathrm{transcript\_mul}}}\f$ +\f$\newcommand{\transcripteq}{{\mathrm{transcript\_eq}}}\f$ +\f$\newcommand{\transcriptresetaccumulator}{{\mathrm{transcript\_reset\_accumulator}}}\f$ +\f$\newcommand{\transcriptmsmtransition}{{\mathrm{transcript\_msm\_transition}}}\f$ +\f$\newcommand{\transcriptpc}{{\mathrm{transcript\_pc}}}\f$ +\f$\newcommand{\transcriptmsmcount}{{\mathrm{transcript\_msm\_count}}}\f$ +\f$\newcommand{\transcriptmsmcountzeroattransition}{{\mathrm{transcript\_msm\_count\_zero\_at\_transition}}}\f$ +\f$\newcommand{\transcriptpx}{{\mathrm{transcript\_Px}}}\f$ +\f$\newcommand{\transcriptpy}{{\mathrm{transcript\_Py}}}\f$ +\f$\newcommand{\transcriptbaseinfinity}{{\mathrm{transcript\_base\_infinity}}}\f$ +\f$\newcommand{\transcriptzone}{{\mathrm{transcript\_z1}}}\f$ +\f$\newcommand{\transcriptztwo}{{\mathrm{transcript\_z2}}}\f$ +\f$\newcommand{\transcriptzonezero}{{\mathrm{transcript\_z1zero}}}\f$ +\f$\newcommand{\transcriptztwozero}{{\mathrm{transcript\_z2zero}}}\f$ +\f$\newcommand{\transcriptop}{{\mathrm{transcript\_op}}}\f$ +\f$\newcommand{\transcriptaccumulatorx}{{\mathrm{transcript\_accumulator\_x}}}\f$ +\f$\newcommand{\transcriptaccumulatory}{{\mathrm{transcript\_accumulator\_y}}}\f$ +\f$\newcommand{\transcriptmsmx}{{\mathrm{transcript\_msm\_x}}}\f$ +\f$\newcommand{\transcriptmsmy}{{\mathrm{transcript\_msm\_y}}}\f$ +\f$\newcommand{\transcriptmsmintermediatex}{{\mathrm{transcript\_msm\_intermediate\_x}}}\f$ +\f$\newcommand{\transcriptmsmintermediatey}{{\mathrm{transcript\_msm\_intermediate\_y}}}\f$ +\f$\newcommand{\transcriptaddxequal}{{\mathrm{transcript\_add\_x\_equal}}}\f$ +\f$\newcommand{\transcriptaddyequal}{{\mathrm{transcript\_add\_y\_equal}}}\f$ +\f$\newcommand{\transcriptbasexinverse}{{\mathrm{transcript\_base\_x\_inverse}}}\f$ +\f$\newcommand{\transcriptbaseyinverse}{{\mathrm{transcript\_base\_y\_inverse}}}\f$ +\f$\newcommand{\transcriptaddlambda}{{\mathrm{transcript\_add\_lambda}}}\f$ +\f$\newcommand{\transcriptmsmxinverse}{{\mathrm{transcript\_msm\_x\_inverse}}}\f$ +\f$\newcommand{\transcriptmsmcountattransitioninverse}{{\mathrm{transcript\_msm\_count\_at\_transition\_inverse}}}\f$ + +| column name | builder name | value range | computation | description | +| -------------------------------------------- | ------------------------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| | | | **Populated in the first loop** | | +| \f$\transcriptmsminfinity\f$ | transcript_msm_infinity | \f$\{0, 1 \}\f$ | `msm_output.is_point_at_infinity();` | are we at the end of an MSM _and_ is the output the point at infinity? | +| \f$\transcriptaccumulatornotempty\f$ | accumulator_not_empty | \f$\{0, 1 \}\f$ | `row.accumulator_not_empty = !state.is_accumulator_empty;`, `final_row.accumulator_not_empty = !updated_state.is_accumulator_empty;` | not(is the accumulator either empty or point-at-infinity?) | +| \f$\transcriptadd\f$ | q_add | \f$\{0, 1 \}\f$ | | is opcode | +| \f$\transcriptmul\f$ | q_mul | \f$\{0, 1 \}\f$ | | is opcode | +| \f$\transcripteq\f$ | q_eq | \f$\{0, 1\}\f$ | | is opcode | +| \f$\transcriptresetaccumulator\f$ | q_reset_accumulator | \f$\{0, 1 \}\f$ | | does opcode reset accumulator? | +| \f$\transcriptmsmtransition\f$ | msm_transition | \f$\{0, 1\}\f$ | `msm_transition = is_mul && next_not_msm && (state.count + num_muls > 0);` | are we at the end of an msm? i.e., is current transcript row the final `mul` opcode of a MSM | +| \f$\transcriptpc\f$ | pc | \f$\fq\f$ | `updated_state.pc = state.pc - num_muls;` | _decreasing_ point counter. Only takes into count `mul` operations, not `add` operations. | +| \f$\transcriptmsmcount\f$ | msm_count | \f$\fq\f$ | `updated_state.count = current_ongoing_msm ? state.count + num_muls : 0;` | Number of muls so far in the _current_ MSM (NOT INCLUDING the current step) | +| \f$\transcriptmsmcountzeroattransition\f$ | msm_count_zero_at_transition | \f$\{0, 1\}\f$ | `((state.count + num_muls) == 0) && entry.op_code.mul && next_not_msm;` | is the number of scalar muls we have completed at the end of our "MSM block" zero? (note that from the definition, if this variable is non-zero, then `msm_transition == 0`.) | +| \f$\transcriptpx\f$ | base_x | \f$\fq\f$ | | (input trace) \f$x\f$-coordinate of base point \f$P\f$ | +| \f$\transcriptpy\f$ | base_y | \f$\fq\f$ | | (input trace) \f$y\f$-coordinate of base point \f$P\f$ | +| \f$\transcriptbaseinfinity\f$ | base_infinity | \f$\{0, 1\}\f$ | | is \f$P=\NeutralElt\f$? | +| \f$\transcriptzone\f$ | z1 | \f$[0,2^{128})\f$ | | (input trace) first part of decomposed scalar | +| \f$\transcriptztwo\f$ | z2 | \f$[0,2^{128})\f$ | | (input trace) second part of decomposed scalar | +| \f$\transcriptzonezero\f$ | z1_zero | \f$\{0, 1\}\f$ | | is z1 zero? | +| \f$\transcriptztwozero\f$ | z2_zero | \f$\{0, 1\}\f$ | | is z2 zero? | +| \f$\transcriptop\f$ | op_code | \f$\in \fq\f$ | `entry.op_code.value();` | 8 `q_add` + 4 `q_mul` + 2 `q_eq` + `q_reset` | +| | | | **Populated after converting from projective to affine coordinates** | | +| \f$\transcriptaccumulatorx\f$ | accumulator_x | \f$\fq\f$ | | x-coordinate of accumulator \f$A\f$ | +| \f$\transcriptaccumulatory\f$ | accumulator_y | \f$\fq\f$ | | y-coordinate of accumulator \f$A\f$ | +| \f$\transcriptmsmx\f$ | msm_output_x | \f$\fq\f$ | | if we are at the end of an MSM, (output of MSM) + `offset_generator()` = `(msm_output_x, msm_output_y)`, else 0 | +| \f$\transcriptmsmy\f$ | msm_output_y | \f$\fq\f$ | | if we are at the end of an MSM, (output of MSM) + `offset_generator()` = `(msm_output_x, msm_output_y)`, else 0 | +| \f$\transcriptmsmintermediatex\f$ | transcript_msm_intermediate_x | \f$\fq\f$ | | if we are at the end of an MSM, (output of MSM) = `(transcript_msm_intermediate_x, transcript_msm_intermediate_y)`, else 0 | +| \f$\transcriptmsmintermediatey\f$ | transcript_msm_intermediate_y | \f$\fq\f$ | | if we are at the end of an MSM, (output of MSM) = `(transcript_msm_intermediate_x, transcript_msm_intermediate_y)`, else 0 | +| \f$\transcriptaddxequal\f$ | transcript_add_x_equal | \f$\{0, 1\}\f$ | `(vm_x == accumulator_x) or (vm_infinity && accumulator_infinity);` | do the accumulator and the point we are adding have the same \f$x\f$-value? (here, the two point we are adding is either part of an `add` instruction or the output of an MSM). 0 if we are not accumulating anything. | +| \f$\transcriptaddyequal\f$ | transcript_add_y_equal | \f$\{0, 1\}\f$ | `(vm_y == accumulator_y) or (vm_infinity && accumulator_infinity);` | do the accumulator and the point we are adding have the same \f$y\f$-value? 0 if we are not accumulating anything. | +| \f$\transcriptbasexinverse\f$ | base_x_inverse | \f$\fq\f$ | | if adding a point to the accumulator and the \f$x\f$ values are not equal, the inverse of the difference of the \f$x\f$ values. (witnesses `transcript_add_x_equal == 0` | +| \f$\transcriptbaseyinverse\f$ | base_y_inverse | \f$\fq\f$ | | if adding a point to the accumulator and the \f$y\f$ values are not equal, the inverse of the difference of the \f$y\f$ values. (witnesses `transcript_add_y_equal == 0` | +| \f$\transcriptaddlambda\f$ | transcript_add_lambda | \f$\fq\f$ | | if adding a point into the accumulator, contains the lambda gradient: the slope of the line between \f$A\f$ and \f$P\f$ | +| \f$\transcriptmsmxinverse\f$ | transcript_msm_x_inverse | \f$\fq\f$ | | used to validate transcript_msm_infinity correct; if the former is zero, this is the inverse of the \f$x\f$ coordinate of the (non-shifted) output of the MSM | +| \f$\transcriptmsmcountattransitioninverse\f$ | msm_count_at_transition_inverse | \f$\fq\f$ | | used to validate transcript_msm_count_zero_at_transition | + +### Transcript description and algorithm + +In the above table, we have a reference what the transcript columns are. Here, we provide a natural-language summary of witness-generation, which in turn directly implies what the constraints should look like. Some of the apparent complexity comes from the fact that, for efficiency, we do operations in _projective coordinates_ and then normalize them all at the end. (This requires fewer field-inversions.) + +We start our top row with \f$\transcriptmsmcount = 0\f$ and \f$\transcriptaccumulatornotempty = 0\f$. This corresponds to saying "there are no active multiplications in our MSM" and "the accumulator is \f$\NeutralElt\f$". + +We process each `op`. + +If the `op` is an `add`, we process the addition as follows. We have an accumulated value \f$A\f$ and a point \f$P\f$ to add. If \f$\transcriptbaseinfinity = 1\f$, we don't need to do anything: \f$P=\NeutralElt\f$. Similarly, if \f$\transcriptaccumulatornotempty = 0\f$, then we just (potentially) need to change \f$\transcriptaccumulatornotempty\f$, \f$\transcriptaccumulatorx\f$ and \f$\transcriptaccumulatory\f$. Otherwise, we need to check \f$\transcriptaddxequal\f$: the formula for point addition requires dividing by \f$\Delta x\f$, and in particular is not well-constrained either when adding points that are negative of each other or adding the same point to itself. (These two cases may be easily distinguished by examining \f$\transcriptaddyequal\f$). If we are _not_ in this case, we need the help of of \f$\transcriptaddlambda\f$, which is the slope between the points \f$P\f$ and \f$A\f$. (This slope will happily not be \f$\infty\f$, as we have ruled out the only occasions it had to be.) + +The value \f$A\leftarrow A + P\f$ will of course involve different \f$\transcriptaccumulatorx\f$ and \f$\transcriptaccumulatory\f$, but may also cause \f$\transcriptaccumulatornotempty\f$ to flip. + +We emphasize: we _do not_ modify \f$\transcriptpc\f$ in this case. Indeed, that variable is only modified based on the number of small scalar `mul`s we are doing. + +If the `op` is `eq`, we process the op as follows. We have an accumulated value \f$A\f$ and a point \f$P\f$. Due to our non-uniform representation of \f$\NeutralElt\f$, we must break up into cases. + +- Both are \f$\NeutralElt\f$ (i.e., \f$\transcriptaccumulatornotempty = 0\f$ and \f$\transcriptbaseinfinity=1\f$). Then accept! +- Neither is equal to \f$\NeutralElt\f$. Then we linearly compare \f$\transcriptaccumulatorx-\transcriptpx\f$ and \f$\transcriptaccumulatory-\transcriptpy\f$ and accept if both are \f$0\f$. +- Exactly one is equal to \f$\NeutralElt\f$. Then reject! + +If our `op` is `eq_reset`, we do the same as for `eq`, but we also set \f$\transcriptaccumulatornotempty\leftarrow 0\f$. + +If our `op` is a `mul`, with scalars `z1` and `z2`, the situation is more complicated. Now we have to update auxiliary wires. As explained, _every_ `mul` operation is understood to be part of an MSM. + +- \f$\transcriptmsmcount\f$ counts the number of active short-scalar multiplications _up to and not including_ the current `mul` op. The value of this column at the _next_ row increments by \f$2 - \transcriptzonezero - \transcriptztwozero\f$. +- In other words, we simply avoid (our deferred) computations if \f$\transcriptzonezero = 1\f$ and/or \f$\transcriptztwozero = 1\f$. +- Similarly, \f$\transcriptpc\f$ _decrements_ by \f$2 - \transcriptzonezero - \transcriptztwozero\f$. We use a decreasing point counter (only counting short `mul`s) for efficiency reasons, as it allows for cheaper commitments. +- If the next `op` is not a `mul`, and the total number of active `mul` operations (which is \f$\transcriptmsmcount + (2 - \transcriptzonezero - \transcriptztwozero)\f$) is non-zero, set the \f$\transcriptmsmtransition = 1\f$. Else, set \f$\transcriptmsmcountzeroattransition = 1\f$. Either way, the current `mul` then represents the end of an MSM. This is where \f$\transcriptmsmcountattransitioninverse\f$ is used. +- If \f$\transcriptmsmtransition = 0\f$, then \f$\transcriptmsmx\f$, \f$\transcriptmsmy\f$, \f$\transcriptmsmintermediatex\f$, and \f$\transcriptmsmintermediatey\f$ are all \f$0\f$. (In particular, this holds when we are in the middle of an MSM.) Otherwise, we call \f$\transcriptmsmx\f$ and \f$\transcriptmsmy\f$ from the multiset argument, i.e., from the MSM table. Then the values of \f$\transcriptmsmintermediatex\f$ and \f$\transcriptmsmintermediatey\f$ are obtained by subtracting off the `OFFSET`. + +#### Transcript size + +The size of the _non-zero_ part of the table is the length of the `OpQueue` + 1 (we have shiftable columns). We have organized our wire values so that zero-padding is compatible with the polynomial constraints. (See e.g. the _decreasing_ point counter.) + +### Precomputed Columns + +As the set of precomputed columns is small, we include the code snippet. + +``` + struct PointTablePrecomputationRow { + int s1 = 0; + int s2 = 0; + int s3 = 0; + int s4 = 0; + int s5 = 0; + int s6 = 0; + int s7 = 0; + int s8 = 0; + bool skew = false; + bool point_transition = false; + uint32_t pc = 0; + uint32_t round = 0; + uint256_t scalar_sum = 0; + AffineElement precompute_accumulator{ + 0, 0 + }; // contains a precomputed element, i.e., something in {P, 3P, ..., 15P}. + AffineElement precompute_double{ 0, 0 }; + }; + +``` + +As discussed in [Decomposing Scalars](#decomposing-scalars), WLOG our scalars have 128 bits and we may expand them in \f$w=4\f$ [wNAF](#wnaf): + +\f[s = \sum_{j=0}^{31} a_j 2^{4j} + \text{skew},\f] +where + +- each \f$a_j\in \{-2^{4}+1, -2^{4}+3,\ldots, 2^{4}-1\}\f$ +- \f$\text{skew}\in\{0, 1\}\f$. + +Given a wNAF digit \f$\in \{-15, -13, \ldots, 15\}\f$, we \f$\text{compress}\f$ it via the map: +\f[\text{compress}\colon d\mapsto \frac{d+15}{2},\f] +which is of course a bijection \f$\{-15, -13, \ldots, 15\}\rightarrow \{0,\ldots, 15\}\f$. (This compression is helpful for indexing later: looking forward, the values \f$[-15P, -13P, \ldots, 15P]\f$ will be stored in an array, so if we want to looking up \f$kP\f$, where \f$k\in \{-15, -13, \ldots, 15\}\f$, we can go to the \f$\text{compress}(k)\f$ index of our array associated to \f$P\f$.) + +\f$\newcommand{\precomputesonehi}{{\mathrm{precompute\_s1hi}}}\f$ +\f$\newcommand{\precomputesonelo}{{\mathrm{precompute\_s1lo}}}\f$ +\f$\newcommand{\precomputestwohi}{{\mathrm{precompute\_s2hi}}}\f$ +\f$\newcommand{\precomputestwolo}{{\mathrm{precompute\_s2lo}}}\f$ +\f$\newcommand{\precomputesthreehi}{{\mathrm{precompute\_s3hi}}}\f$ +\f$\newcommand{\precomputesthreelo}{{\mathrm{precompute\_s3lo}}}\f$ +\f$\newcommand{\precomputesfourhi}{{\mathrm{precompute\_s4hi}}}\f$ +\f$\newcommand{\precomputesfourlo}{{\mathrm{precompute\_s4lo}}}\f$ +\f$\newcommand{\precomputeskew}{{\mathrm{precompute\_skew}}}\f$ +\f$\newcommand{\precomputepointtransition}{{\mathrm{precompute\_point\_transition}}}\f$ +\f$\newcommand{\precomputepc}{{\mathrm{precompute\_pc}}}\f$ +\f$\newcommand{\precomputeround}{{\mathrm{precompute\_round}}}\f$ +\f$\newcommand{\precomputescalarsum}{{\mathrm{precompute\_scalar\_sum}}}\f$ +\f$\newcommand{\precomputetx}{{\mathrm{precompute\_tx}}}\f$ +\f$\newcommand{\precomputety}{{\mathrm{precompute\_ty}}}\f$ +\f$\newcommand{\precomputedx}{{\mathrm{precompute\_dx}}}\f$ +\f$\newcommand{\precomputedy}{{\mathrm{precompute\_dy}}}\f$ +\f$\newcommand{\preselect}{{\mathrm{precompute\_select}}}\f$ + +The following is one row in the Precomputed table; there are `NUM_WNAF_DIGITS_PER_SCALAR / WNAF_DIGITS_PER_ROW == 32/4 = 8` rows. The row index is `i`. (This number is is also witnessed as `round`.) +| column name | builder name | value range | computation | description | +| ----------- | ---------------------- | ----------- | --------------------------------------------------------------- | ------------------------------------------------------- | +| \f$\precomputesonehi\f$ | s1 | \f$[0, 4)\f$ | | first two bits of \f$\text{compress}(a_{31 - 4i})\f$ | +| \f$\precomputesonelo\f$ | s2 | \f$[0, 4)\f$ | | second two bits of \f$\text{compress}(a_{31 - 4i})\f$ | +| \f$\precomputestwohi\f$ | s3 | \f$[0, 4)\f$ | | first two bits of \f$\text{compress}(a_{31 - (4i + 1)})\f$ | +| \f$\precomputestwolo\f$ | s4 | \f$[0, 4)\f$ | | second two bits of \f$\text{compress}(a_{31 - (4i + 1)})\f$ | +| \f$\precomputesthreehi\f$ | s5 | \f$[0, 4)\f$ | | first two bits of \f$\text{compress}(a_{31 - (4i + 2)})\f$ | +| \f$\precomputesthreelo\f$ | s6 | \f$[0, 4)\f$ | | second two bits of \f$\text{compress}(a_{31 - (4i + 2)})\f$ | +| \f$\precomputesfourhi\f$ | s7 | \f$[0, 4)\f$ | | first two bits of \f$\text{compress}(a_{31 - (4i + 3)})\f$ | +| \f$\precomputesfourlo\f$ | s8 | \f$[0, 4)\f$ | | second two bits of \f$\text{compress}(a_{31 - (4i + 3)})\f$ | +| \f$\precomputeskew\f$ | skew | \f$\{0,1\}\f$ | | skew bit | +| \f$\precomputepointtransition\f$ | point_transition | \f$\{0,1\}\f$ | | are we at the last row corresponding to this scalar? | +| \f$\precomputepc\f$ | pc | \f$\fq\f$ | | value of the point counter of this EC operation | +| \f$\precomputeround\f$ | round | \f$\fq\f$ | | "row" of the computation, i.e., `i`. | +| \f$\precomputescalarsum\f$ | scalar_sum | \f$\fq\f$ | | sum up-to-now of the digits | +| \f$\precomputetx\f$, \f$\precomputety\f$ | precompute_accumulator | \f$E(\fq)\subset \fq\times \fq\f$ | | \f$(15-2i)P\f$ | +| \f$\precomputedx\f$, \f$\precomputedy\f$ | precompute_double | \f$E(\fq)\subset \fq\times \fq\f$ | | \f$2P\f$ | +| \f$\preselect\f$ | | \f$\{0,1\}\f$ | | if 1, evaluate Straus precomputation algorithm at current row | + +### Precomputed Description and Algorithm + +First, let us recall the structure of `ScalarMul`. + +``` +template struct ScalarMul { + uint32_t pc; + uint256_t scalar; + typename CycleGroup::affine_element base_point; + std::array + wnaf_digits; // [a_{n-1}, a_{n-1}, ..., a_{0}], where each a_i ∈ {-2ʷ⁻¹ + 1, -2ʷ⁻¹ + 3, ..., 2ʷ⁻¹ - 3, 2ʷ⁻¹ - + // 1} ∪ {0}. (here, w = `NUM_WNAF_DIGIT_BITS`). in particular, a_i is an odd integer with + // absolute value less than 2ʷ. Represents the number `scalar` = ∑ᵢ aᵢ 2⁴ⁱ - `wnaf_skew`. + bool wnaf_skew; // necessary to represent _even_ integers + // size bumped by 1 to record base_point.dbl() + std::array precomputed_table; +}; +``` + +Note that, with respect to the decomposition in [wnaf](#wnaf), `wnaf_digits[i]`= \f$a_{31-i}\f$. Indeed, the order of the array `wnaf_digits` is from highest-order to lowest-order. + +Given a `ScalarMul`, it is easy to construct the 8 rows of the Precomputed Table. As explained, `WNAF_DIGITS_PER_ROW = 4`; hence the `NUM_WNAF_DIGITS_PER_SCALAR = 32` digits in may be broken up into 8 rows, where each row corresponds to 4 wNAF digits, each of which is in \f$\{-15, -13, \ldots, 13, 15\}\f$. + +1. For \f$i = 0 .. 7\f$ + + 1. For each of the 4 digits in the row: `wnaf_digits[4i]`, `wnaf_digits[4i+1]`, `wnaf_digits[4i+2]`, and `wnaf_digits[4i+3]`, `compress` from \f$\{-15, -13, \ldots, 13, 15\}\rightarrow \{0,\ldots 15\}\f$ via the monotonic map \f$z\mapsto \frac{z+15}{2}\f$. Then our compressed digits are in the latter range. + 2. extract the first and last pair of bits and fill in order in corresponding parts of the table: \f$\precomputesonehi\f$, \f$\precomputesonelo\f$, \f$\precomputestwohi\f$, \f$\precomputestwolo\f$, \f$\precomputesthreehi\f$, \f$\precomputesthreelo\f$, \f$\precomputesfourhi\f$, \f$\precomputesfourlo\f$ correspond to the 2-bit decompositions of the compressed wNAF digits. + 3. The value \f$\precomputepointtransition\f$ is set to 1 if this is the last row (i.e., `i == 7`) for the current scalar, else 0. This tracks if the next row in the table corresponds to a new `ScalarMul`. + 4. The value \f$\precomputepc\f$ is copied from the corresponding `ScalarMul.pc`. + 5. The value \f$\precomputeround\f$ is set to the row index `i`. + 6. The value \f$\precomputescalarsum\f$ accumulates the _scalar reconstruction_: \f$\displaystyle \sum_{j=0}^{4i+3} a_{31-j} \cdot 2^{4j}\f$. (Here, our current row is \f$i\f$.) In other words: at row \f$i\f$, we implicit consider the string of digits `wnaf_digits[0]`, ..., `wnaf_digits[4i+3]`; \f$\precomputescalarsum\f$ is precisely the value of the \f$4i\f$-digit number corresponding to this string of digits. + 7. The value \f$(\precomputetx, \precomputety)\f$ stores the precomputed point \f$(15-2i)P\f$. (Note that this reflects a coincidence: the number of rows (per scalar multiplication) is same as the number of odd multiples of \f$P\f$ that we need to store.) + 8. The value \f$(\precomputedx, \precomputedy)\f$ stores \f$2P\f$. (In particular, \f$2P\f$ is stored on all \f$8\f$ rows coming from a given `ScalarMul`.) + +The constraints are straightforward. + +- We must range constrain the \f$\precomputesonehi\f$, \f$\precomputesonelo\f$, \f$\precomputestwohi\f$, \f$\precomputestwolo\f$, \f$\precomputesthreehi\f$, \f$\precomputesthreelo\f$, \f$\precomputesfourhi\f$, \f$\precomputesfourlo\f$. We do this via the polynomial \f$((x-1)^2 - 1)((x-2)^2-1)\f$, a quartic constraint. +- We constrain that \f$\precomputescalarsum\f$ is updated correctly at each row. +- When \f$\precomputepointtransition = 1\f$, when we constrain that original `scalar` is \f$\precomputescalarsum - \precomputeskew\f$. +- We constrain the elliptic curve values. Note that we may assume that \f$P\neq \NeutralElt\f$; indeed, we only populate this table when we are doing non-trivial scalar multiplications. It follows that \f$nP\neq \NeutralElt\f$ for \f$0 add_state{ AddState{ false, 0, { 0, 0 }, 0, 0 }, + AddState{ false, 0, { 0, 0 }, 0, 0 }, + AddState{ false, 0, { 0, 0 }, 0, 0 }, + AddState{ false, 0, { 0, 0 }, 0, 0 } }; + FF accumulator_x = 0; + FF accumulator_y = 0; + }; +``` + +\f$\newcommand{\msmpc}{{\mathrm{msm\_pc}}}\f$ +\f$\newcommand{\msmsizeofmsm}{{\mathrm{msm\_size\_of\_msm}}}\f$ +\f$\newcommand{\msmcount}{{\mathrm{msm\_count}}}\f$ +\f$\newcommand{\msmround}{{\mathrm{msm\_round}}}\f$ +\f$\newcommand{\msmtransition}{{\mathrm{msm\_transition}}}\f$ +\f$\newcommand{\msmadd}{{\mathrm{msm\_add}}}\f$ +\f$\newcommand{\msmdouble}{{\mathrm{msm\_double}}}\f$ +\f$\newcommand{\msmskew}{{\mathrm{msm\_skew}}}\f$ +\f$\newcommand{\msmxone}{{\mathrm{msm\_x1}}}\f$ +\f$\newcommand{\msmyone}{{\mathrm{msm\_y1}}}\f$ +\f$\newcommand{\msmxtwo}{{\mathrm{msm\_x2}}}\f$ +\f$\newcommand{\msmytwo}{{\mathrm{msm\_y2}}}\f$ +\f$\newcommand{\msmxthree}{{\mathrm{msm\_x3}}}\f$ +\f$\newcommand{\msmythree}{{\mathrm{msm\_y3}}}\f$ +\f$\newcommand{\msmxfour}{{\mathrm{msm\_x4}}}\f$ +\f$\newcommand{\msmyfour}{{\mathrm{msm\_y4}}}\f$ +\f$\newcommand{\msmaddone}{{\mathrm{msm\_add1}}}\f$ +\f$\newcommand{\msmaddtwo}{{\mathrm{msm\_add2}}}\f$ +\f$\newcommand{\msmaddthree}{{\mathrm{msm\_add3}}}\f$ +\f$\newcommand{\msmaddfour}{{\mathrm{msm\_add4}}}\f$ +\f$\newcommand{\msmsliceone}{{\mathrm{msm\_slice1}}}\f$ +\f$\newcommand{\msmslicetwo}{{\mathrm{msm\_slice2}}}\f$ +\f$\newcommand{\msmslicethree}{{\mathrm{msm\_slice3}}}\f$ +\f$\newcommand{\msmslicefour}{{\mathrm{msm\_slice4}}}\f$ +\f$\newcommand{\msmlambdaone}{{\mathrm{msm\_lambda1}}}\f$ +\f$\newcommand{\msmlambdatwo}{{\mathrm{msm\_lambda2}}}\f$ +\f$\newcommand{\msmlambdathree}{{\mathrm{msm\_lambda3}}}\f$ +\f$\newcommand{\msmlambdafour}{{\mathrm{msm\_lambda4}}}\f$ +\f$\newcommand{\msmcollisionxone}{{\mathrm{msm\_collision\_x1}}}\f$ +\f$\newcommand{\msmcollisionxtwo}{{\mathrm{msm\_collision\_x2}}}\f$ +\f$\newcommand{\msmcollisionxthree}{{\mathrm{msm\_collision\_x3}}}\f$ +\f$\newcommand{\msmcollisionxfour}{{\mathrm{msm\_collision\_x4}}}\f$ +\f$\newcommand{\msmaccumulatorx}{{\mathrm{msm\_accumulator\_x}}}\f$ +\f$\newcommand{\msmaccumulatory}{{\mathrm{msm\_accumulator\_y}}}\f$ + +| column name | builder name | value range | computation | description | +| ------------------------- | ------------------------------ | -------------- | ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| \f$\msmpc\f$ | pc | \f$\fq\f$ | | counter over all half-length (128 bit) scalar muls used to compute the required MSMs. constant on a given MSM, refers more precisely to the number of _completed_ scalar muls up until the current MSM. in particular, this skips values, unlike `transcript_pc`. | +| \f$\msmsizeofmsm\f$ | msm_size | \f$\fq\f$ | | the number of points that will be scaled and summed | +| \f$\msmcount\f$ | msm_count | \f$\fq\f$ | `row.msm_count = static_cast(offset);` | number of wNAF-multiplications processed so far _in this round_. | +| \f$\msmround\f$ | msm_round | \f$[0, 32]\f$ | | current "round" of MSM, in \f$\{0, \ldots, 32\}\f$, which corresponds to the wNAF digit being processed. (final round deals with the `skew` bit.) | +| \f$\msmtransition\f$ | msm_transition | \f$\{0, 1\}\f$ | `(digit_idx == 0) && (row_idx == 0)` | is 1 if the current row starts the processing of a different MSM, else 0 . Note this _not_ the same as the description of `transcript_msm_transition` | +| \f$\msmadd\f$ | q_add | \f$\{0, 1\}\f$ | | 1 if we are adding points in the Straus MSM algorithm at current row | +| \f$\msmdouble\f$ | q_double | \f$\{0, 1\}\f$ | | 1 if we are doubling accumulator in the Straus MSM algorithm at current row | +| \f$\msmskew\f$ | q_skew | \f$\{0, 1\}\f$ | | 1 if we are incorporating skew points in the Straus MSM algorithm at current row | +| \f$\msmxone\f$ | add_state[0].point.x | \f$\fq\f$ | | \f$x\f$-coordinate of first potential point (corresponding to add_state[0]) to add in Straus MSM round | +| \f$\msmyone\f$ | add_state[0].point.y | \f$\fq\f$ | | \f$y\f$-coordinate of first potential point (corresponding to add_state[0]) to add in Straus MSM round | +| \f$\msmxtwo\f$ | add_state[1].point.x | \f$\fq\f$ | | \f$x\f$-coordinate of second potential point (corresponding to add_state[1]) to add in Straus MSM | +| \f$\msmytwo\f$ | add_state[1].point.y | \f$\fq\f$ | | \f$y\f$-coordinate of second potential point (corresponding to add_state[1]) to add in Straus MSM | +| \f$\msmxthree\f$ | add_state[2].point.x | \f$\fq\f$ | | x-coordinate of third potential point (corresponding to add_state[2]) to add in Straus MSM round | +| \f$\msmythree\f$ | add_state[2].point.y | \f$\fq\f$ | | y-coordinate of third potential point (corresponding to add_state[2]) to add in Straus MSM round | +| \f$\msmxfour\f$ | add_state[3].point.x | \f$\fq\f$ | | x-coordinate of fourth potential point (corresponding to add_state[3]) to add in Straus MSM round | +| \f$\msmyfour\f$ | add_state[3].point.y | \f$\fq\f$ | | y-coordinate of fourth potential point (corresponding to add_state[3]) to add in Straus MSM round | +| \f$\msmaddone\f$ | add_state[0].add | \f$\{0, 1\}\f$ | | are we adding msm_x1/msm_y1 (resp. add_state[0]) into accumulator at current round? | +| \f$\msmaddtwo\f$ | add_state[1].add | \f$\{0, 1\}\f$ | | are we adding msm_x2/msm_y2 (resp. add_state[1]) into accumulator at current round? | +| \f$\msmaddthree\f$ | add_state[2].add | \f$\{0, 1\}\f$ | | are we adding msm_x3/msm_y3 (resp. add_state[2]) into accumulator at current round? | +| \f$\msmaddfour\f$ | add_state[3].add | \f$\{0, 1\}\f$ | | are we adding msm_x4/msm_y4 (resp. add_state[3]) into accumulator at current round? | +| \f$\msmsliceone\f$ | add_state[0].slice | \f$[0, 15]\f$ | | wNAF slice value (a.k.a. digit) for first point (corresponds to odd number in \f$\{-15, -13, \ldots, 15\}\f$ via the monotonic bijection) | +| \f$\msmslicetwo\f$ | add_state[1].slice | \f$[0, 15]\f$ | | wNAF slice value (a.k.a. digit) for second point | +| \f$\msmslicethree\f$ | add_state[2].slice | \f$[0, 15]\f$ | | wNAF slice value (a.k.a. digit) for third point | +| \f$\msmslicefour\f$ | add_state[3].slice | \f$[0, 15]\f$ | | wNAF slice value (a.k.a. digit) for fourth point | +| \f$\msmlambdaone\f$ | add_state[0].lambda | \f$\fq\f$ | | if add_state[0].add==1 (eqiv. if msm_add1 == 1), slope of the line between the two points being added. else 0. | +| \f$\msmlambdatwo\f$ | add_state[1].lambda | \f$\fq\f$ | | if add_state[1].add==1 (eqiv. if msm_add2 == 1), slope of the line between the two points being added. else 0. | +| \f$\msmlambdathree\f$ | add_state[2].lambda | \f$\fq\f$ | | if add_state[2].add==1 (eqiv. if msm_add3 == 1), slope of the line between the two points being added. else 0. | +| \f$\msmlambdafour\f$ | add_state[3].lambda | \f$\fq\f$ | | if add_state[3].add==1 (eqiv. if msm_add3 == 1), slope of the line between the two points being added. else 0. | +| \f$\msmcollisionxone\f$ | add_state[0].collision_inverse | \f$\fq\f$ | | if add_state[0].add == 1, the difference of the \f$x\f$ values of the accumulator and the point being added. used to ensure incomplete ecc addition exceptions not triggered if msm_add1 = 1 | +| \f$\msmcollisionxtwo\f$ | add_state[1].collision_inverse | \f$\fq\f$ | | if add_state[1].add == 1, the difference of the \f$x\f$ values of the accumulator and the point being added. | +| \f$\msmcollisionxthree\f$ | add_state[2].collision_inverse | \f$\fq\f$ | | if add_state[2].add == 1, the difference of the \f$x\f$ values of the accumulator and the point being added. | +| \f$\msmcollisionxfour\f$ | add_state[3].collision_inverse | \f$\fq\f$ | | if add_state[3].add == 1, the difference of the \f$x\f$ values of the accumulator and the point being added. | +| \f$\msmaccumulatorx\f$ | accumulator_x | \f$\fq\f$ | | (accumulator_x, accumulator_y) = \f$A\f$ is the accumulated point | +| \f$\msmaccumulatory\f$ | accumulator_y | \f$\fq\f$ | + +### MSM algorithm and description + +We have already given a high-level summary of the Straus algorithm. Let us get into the weeds! + +The function signature is the following: + +``` +static std::tuple, std::array, 2>> compute_rows( + const std::vector& msms, const uint32_t total_number_of_muls, const size_t num_msm_rows) + +``` + +In other words, `compute_rows` takes in a vector of MSMs (each of which is a vector of `ScalarMul`s), together with the total number of non-zero `mul` operations we compute and the (easy-to-compute) a priori size bound `num_msm_rows`, and returns a vector of `MSMRow`s and two vectors, which will represent our point-counts (i.e., will be fed into the lookup argument). + +Before we get into the content, note that we may assume that no point is \f$\NeutralElt\f$ in any of the MSMs. Indeed, this is due to checks done by the Transcript Columns. However, it is in principle possible that some of the scalars are \f$0\f$; we do not force \f$\transcriptzonezero = 0 \Rightarrow \transcriptzone != 0\f$ + +Each row (after the first row) in the MSM table will belong to one of the MSMs we are assigned to compute in `msms`. For an `msm` of size `m`, the number of rows that will be added in the MSM table is: + +\f[(\texttt{NUM-WNAF-DIGITS-PER-SCALAR + 1})\lceil \frac{m}{\texttt{ADDITIONS-PER-ROW}}\rceil + (\texttt{NUM-WNAF-DIGITS-PER-SCALAR} - 1) = 33\frac{m}{4} + 31.\f] +There is one other quirk we should explicate before entering the algorithm. In general, the logic for affine elliptic curve addition can have cases: when the \f$x\f$ coordinates match up. (Doubling cannot have cases for points on our affine elliptic curve because there is no \f$\fq\f$-rational \f$2\f$-torsion.) Moreover, in general our logic must branch if either our base or the accumulator is \f$\NeutralElt\f$. As we have indicated several times above, for optimization, we _represent_ \f$\NeutralElt\f$ as \f$(0, 0)\f$ in the code. It is advantageous to avoid this branching logic. We do so by _relaxing completeness_. In particular, we start off the accumulator of every MSM with a fixed `offset_generator`. This is a fixed point of \f$E\f$ that we may consider pseudo-random (though it is fixed and indeed hardcoded). Then we decree that for our MSM to be valid, in the course of the Straus algorithm, whenever I accumulate \f$A\leftarrow A + P\f$, the \f$x\f$-coordinates of \f$A\f$ and \f$P\f$ differ. This condition of being valid may be witnessed by the prover providing the inverse of the difference of the \f$x\f$-coordinates every time it is necessary. + +This indeed breaks completeness, inasmuch as there are valid `EccOpQueue`s which will not be able to be compiled into a valid execution trace. However, this is vanishingly unlikely, in the course of any normal operations. + +Finally, we may describe the algorithm. We implicitly organize our data in the following type of table (as indicated in the [Straus Section](#straus)). Each row of our table corresponds to a scalar multiplication: the elements of the row are the wNAF digits (including the `skew` bit). In other words, the columns of our table correspond to wNAF digits. Our algorithm will proceed column by column, from most significant to least significant digit, processing one vertical chunk of four elements after another. To emphasize: this table syntactically encoding our MSM is _not_ what we refer to as the MSM table of the VM, which rather witnesses the correct execution of the MSM. + +1. Set the first row of the MSM table (of our VM) to be 0. +2. Initialize lookup table read counts: `point_table_read_counts[0]` and `point_table_read_counts[1]` to track the positive and negative lookups corresponding to \f$nP\f$, where \f$n\in \{-15, -13, \ldots, 13, 15\}\f$. Each table will have size `total_number_of_muls * 8` (since `POINT_TABLE_SIZE/2 = 8`). +3. Compute the MSM row boundaries: for each MSM, fill out the indices of where it starts and the starting \f$\msmpc\f$. This requires a calculation of the number of rows required, which we come back to in the [next section](#msm-size). +4. First pass: populate `point_table_read_counts` based on `msm[point_idx].wnaf_digits[digit_idx]`. Update read counts based on skew as well. + +We deviate from the witness generation algorithm here. In the code, in order to minimize the number of field divisions, we first compute in projective coordinates, then batch-normalize back to affine to fill in the values affine values. Here we just specify the values of the various columns in a more naive way. + +5. Set the accumulator at the beginning of every `msm` to be `offset_generator`. (This allows us to avoid case-logic in EC addition.) +6. For `digit-position` (a.k.a. column of my syntactic MSM table) in \f$0..31\f$: + + 1. Populate the rows of the VM's MSM table as follows. + + 1. Check if the row corresponds to a new `msm`. If so, set \f$\msmtransition = 1\f$. + 2. Process the (no greater than) `ADDITIONS_PER_ROW` points per row: + + 1. Get the up-until-now value of the accumulator and set into \f$(\msmaccumulatorx, \msmaccumulatory)\f$. For the first row of an MSM, this is `offset_generator`, for a non-first row of an MSM this involves processing the previous row of the MSM table. + 2. Set \f$\msmadd = 1\f$, \f$\msmdouble = 0\f$, and \f$\msmskew = 0\f$. + 3. Set the booleans \f$\msmaddone\f$, \f$\msmaddtwo\f$, \f$\msmaddthree\f$, and \f$\msmaddfour\f$ to the correct values (all should be one if we haven't yet exhausted the column, if we are at the end of a column and \f$m\f$ is not divisible by 4, only the first \f$m\text{ mod} 4\f$ should be turned on). + 4. For each point that is "on", record the following (which all correspond to members of `AddState`): + 1. the slice a.k.a. digit value. (This has values in \f$\{0,\ldots,15\}\f$ and corresponds to the elements \f$\{-15, -13, \ldots, 13, 15\}\f$.) These are filled in \f$\msmsliceone\f$, \f$\msmslicetwo\f$, \f$\msmslicethree\f$, and \f$\msmslicefour\f$. + 2. The precomputed value of the slice/digit times the corresponding base point. These are filled in \f$\msmxone\f$, \f$\msmyone\f$, \f$\msmxtwo\f$, \f$\msmytwo\f$, \f$\msmxthree\f$, \f$\msmythree\f$, and \f$\msmxfour\f$, \f$\msmyfour\f$. Note that, as we are proceeding vertically, the base points corresponding to \f$\msmsliceone\f$, \f$\msmslicetwo\f$, \f$\msmslicethree\f$, and \f$\msmslicefour\f$ may very well all be different. + 3. Auxiliary values needed to compute the sum of the accumulator and the points-to-be-added into the accumulator: in particular, the slope of the line between the (intermediate) accumulator and the point-to-be-added. These are contained in \f$\msmlambdaone\f$, \f$\msmlambdatwo\f$, \f$\msmlambdathree\f$, and \f$\msmlambdafour\f$. Here, there is a subtle point: we do not explicitly record the intermediate values of the accumulator in this row in our VM's MSM table, although \f$\msmlambdatwo\f$, \f$\msmlambdathree\f$, and \f$\msmlambdafour\f$ reflect these values. Indeed, if \f$Q_1 = (\msmxone, \msmyone)\f$, \f$Q_2 = (\msmxtwo, \msmytwo)\f$, and our accumulator is starting at \f$A\f$, then \f$\msmlambdaone\f$ is the slope between the line \f$A\f$ and \f$Q_1\f$, while \f$\msmlambdatwo\f$ is the slope between the line \f$A+Q_1\f$ and \f$Q_2\f$. However, \f$A + Q_1\f$ is _not_ explicitly recorded in our MSM table. + 4. For each point that is "on", fill in the following values \f$\msmcollisionxone\f$, \f$\msmcollisionxtwo\f$, \f$\msmcollisionxthree\f$, and \f$\msmcollisionxfour\f$. These are the differences in the \f$x\f$ values between the (intermediate) accumulator and the point-to-be-added. This witnesses/verifies the fact that we don't have edge-case logic for the addition. As with the \f$\lambda\f$ values, these reflect the intermediate values of the accumulator although that intermediate value is _not_ explicitly recorded in our MSM table. + + 3. Process the 4 doublings, as long as we are not at the last wnaf digit. This involves adding a _single_ row to the MSM table. + 1. Set \f$\msmadd = 0\f$, \f$\msmdouble = 1\f$, and \f$\msmskew = 0\f$. + 2. Get the value of \f$\msmaccumulatorx\f$ and \f$\msmaccumulatory\f$ from the last row. + 3. The values: \f$\msmcount\f$, \f$\msmtransition\f$, \f$\msmsliceone\f$, \f$\msmslicetwo\f$, \f$\msmslicethree\f$, \f$\msmslicefour\f$, \f$\msmxone\f$, \f$\msmyone\f$, \f$\msmxtwo\f$, \f$\msmytwo\f$, \f$\msmxthree\f$, \f$\msmythree\f$, \f$\msmxfour\f$, \f$\msmyfour\f$, \f$\msmcollisionxone\f$, \f$\msmcollisionxtwo\f$, \f$\msmcollisionxthree\f$, and \f$\msmcollisionxfour\f$ are all set to \f$0\f$. + 4. We set \f$\msmlambdaone\f$, \f$\msmlambdatwo\f$, \f$\msmlambdathree\f$, and \f$\msmlambdafour\f$ correctly: they are each the slope of the line passing through the current _intermediate_ accumulator tangent to \f$E\f$. For instance, \f$\msmlambdaone\f$ is the slope of the line through \f$A\f$, \f$\msmlambdatwo\f$ is the slope through \f$2A\f$, etc. + 4. Process the skew digit in an analogous way to the processing of the additions. + +### MSM size + +Suppose we have an MSM of short scalars of size \f$m\f$. Then the number of rows we add to the MSM table of the VM is: + +\f[(\texttt{NUM-WNAF-DIGITS-PER-SCALAR + 1})\lceil \frac{m}{\texttt{ADDITIONS-PER-ROW}}\rceil + (\texttt{NUM-WNAF-DIGITS-PER-SCALAR} - 1) = 33\frac{m}{4} + 31.\f] +Indeed, there are \f$\lceil \frac{m}{\texttt{ADDITIONS-PER-ROW}}\rceil\f$ `add`-rows per digit, and there are \f$\texttt{NUM-WNAF-DIGITS-PER-SCALAR + 1}\f$ digits per scalar (where the last digit is the `skew` digit). Finally, the last term comes from the doublings. + +Note that in the regime where we have a few long MSMs, this is asymptotic to \f$8.25m\f$, which is comparable to the \f$8m\f$ we get from the precomputed columns. On the other hand, if we have many very short MSMs, the size of this table dominates what was produced by the precomputed columns. + +## Multisets and Lookups + +As explained in the introduction, we sometimes treat these three sets of disjoint columns as three separate tables. There must be a mechanism to ensure that they "communicate" with each other. We do _not_ use bare copy-constraints; instead, we use three multisets equality checks. (These were formerly called "strict lookup arguments", where every write had to have precisely one corresponding read.) The goal of these section is to sketch how these constraints, together with the lookups, fully piece together the ECCVM. We emphasize that this is merely a sketch; for full details, please see the [set relation](../relations/ecc_vm/ecc_set_relation_impl.hpp). + +### Multisets + +The basic structure: each term corresponds to _two_ multisets. (One could refer to these as an input multiset and an output multiset, but this directionality is purely psychological and we avoid it.) One table contributes to one of the multisets, another table constributes to the other multiset, and the term is _satisfied_ if the two multisets are equal. + +#### First term: `(pc, round, wnaf_slice)` + +This facilitates communication between the Precomputed table and the MSM table. Recall that `pc` stands for point-counter. `round` refers to the wNAF digit-place being processed and `wnaf_slice` is the _compressed_ digit (i.e., it is a way of representing the actual wNAF digit). Recall that the skew digit's place corresponds to `round == 32`. This multiset check ensures that at every `round`, for a given point, the wNAF digit computed by the Precomputed table is actually being used by the MSM table. + +#### Second term: `(pc, P.x, P.y, scalar-multiplier)` + +This facilitates communication between the Precomputed table (and more specifically, the PointTable) and the Transcript table. More precisely, it ensures that the Precomputed table has done the wNAF decomposition correctly for the scalar corresponding to the point at position `pc`. + +#### Third term: `(pc, P.x, P.y, msm-size)` + +This facilitates communication between the MSM table and the Transcript table. More precisely, this links the _output_ of the MSM (that is performed by the MSM table) to what is written in the Transcript table. We also ensure that `msm-size` is correctly inputed into the MSM table. + +### Lookups + +Unlike the multisets (a.k.a. "strict lookup arguments"), the lookups here are more conventional. For every non-trivial point \f$P\f$, there is a lookup table (computed by the Precomputed table) that contains `(pc, compressed_slice, (2 * (compressed_slice) - 15)[P])`, where `compressed_slice` is in the range {0, ..., 15}. The MSM table will look up the relevant value as it goes through the Straus algorithm. For full details, please see [lookup relation](../relations/ecc_vm/ecc_lookup_relation.hpp). diff --git a/barretenberg/cpp/src/barretenberg/eccvm/README_FUZZERS.md b/barretenberg/cpp/src/barretenberg/eccvm/README_FUZZERS.md new file mode 100644 index 000000000000..25fb3df7cf33 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/eccvm/README_FUZZERS.md @@ -0,0 +1,219 @@ +# ECCVM Fuzzer + +This document describes the ECCVM (Elliptic Curve Circuit Virtual Machine) fuzzer implementation, which provides comprehensive testing of the ECCVM circuit builder and trace checker. + +## Overview + +The ECCVM fuzzer generates random ECC operations to test the ECCVM circuit builder. It checks the correctness of the trace without constructing the full proof. + + +## Architecture + +### Data Structures + +The fuzzer operates on the following data structures: + +```cpp +// Operation types for the fuzzer +enum class OpType : uint8_t { + ADD = 0, // Add a point to the accumulator + MUL = 1, // Multiply a point by a scalar and add to accumulator + EQ_AND_RESET = 2, // Check equality and reset accumulator + MERGE = 3, // Merge operation (used internally) + EMPTY_ROW = 4, // Insert empty row for testing + MAX_OP = 5 +}; + +struct FieldVMDataChunk { + std::array data; // 64 bytes of FieldVM data +}; + +struct SingleOp { + OpType op_type; // Operation type + uint8_t generator_index; // Generator index and top bit for negating the generator + uint8_t scalar_index; // Scalar index (0-255) +}; + +struct FuzzerTuple { + FieldVMDataChunk fieldvm_data; // FieldVM data for scalar computation + SingleOp operation; // ECCVM operation +}; +``` + +### Operation Flow + +1. **Input Processing**: The fuzzer processes `FuzzerTuple` structures containing FieldVM data and ECC operations. + +2. **FieldVM Computation**: Uses FieldVM to precompute scalars from the input data with controlled settings: + - Disabled heavy operations (inversion, square root, batch inversion, power, division) + - Limited to 65536 steps for performance + +3. **Point Generation**: Creates base generators and linear combinations for testing. + +4. **Operation Execution**: Processes operations through the ECCOpQueue: + - ADD: Adds points to accumulator + - MUL: Multiplies points by scalars and adds to accumulator + - EQ_AND_RESET: Checks equality and resets accumulator + - MERGE: Merges operations + - EMPTY_ROW: Inserts empty rows for testing + +5. **Circuit Validation**: Uses `ECCVMTraceChecker::check()` to validate the circuit. + +## Conditional Skip Optimization + +The fuzzer includes conditional skip optimization that is only enabled in fuzzing builds: + +### Fuzzing Builds (`FUZZING` macro defined) +- Skip optimization is enabled for performance +- Relations with `skip` methods are skipped when their skip condition is met +- Currently only `ECCVMSetRelation` has a skip method + +### Production Builds (default) +- Skip optimization is disabled for maximum security +- All relations are always accumulated +- Ensures no performance optimizations affect correctness + +### Skip Conditions + +Currently, only `ECCVMSetRelation` implements skip optimization: + +```cpp +template inline static bool skip(const AllEntities& in) +{ + // Skip when no non-trivial copy constraints and no transcript operations + return (in.z_perm - in.z_perm_shift).is_zero() && + in.transcript_mul.is_zero() && + in.lagrange_last.is_zero(); +} +``` + +## Build Configuration + +### Compilation Flags + +```bash +# For fuzzing builds (with skip optimization) +clang++ -DFUZZING -fsanitize=fuzzer -O2 -g eccvm.fuzzer.cpp -o eccvm_fuzzer + +# For production builds (without skip optimization) +clang++ -O2 -g eccvm.fuzzer.cpp -o eccvm_fuzzer +``` + +### Function Signature + +The fuzzer function signature changes based on build configuration: + +```cpp +// Fuzzing builds +bool ECCVMTraceChecker::check(Builder& builder, + numeric::RNG* engine_ptr, + bool disable_fixed_dyadic_trace_size); + +// Production builds +bool ECCVMTraceChecker::check(Builder& builder, + numeric::RNG* engine_ptr); +``` + +## Usage + +### Running the Fuzzer + +```bash +# Basic fuzzing run +./eccvm_fuzzer + +# Run with specific corpus directory +./eccvm_fuzzer corpus/ + +# Run with custom parameters +./eccvm_fuzzer -max_len=1024 -timeout=30 corpus/ +``` + +### Fuzzer Parameters + +- **Input Size**: Must be at least `sizeof(FuzzerTuple)` bytes +- **Number of Operations**: Determined by `Size / sizeof(FuzzerTuple)` +- **FieldVM Data**: 64 bytes per operation for scalar computation +- **Generator Indices**: 0-255 range, with modulo operations for safety +- **Scalar Indices**: 0-255 range, used to select precomputed scalars + +## Error Handling + +The fuzzer includes comprehensive error handling: + +1. **Input Validation**: Checks minimum input size and valid operation count +2. **Bounds Checking**: All array accesses are bounds-checked +3. **Exception Handling**: Catches and reports exceptions without crashing +4. **Circuit Validation**: Reports detailed failure information when circuit check fails + +### Error Reporting + +When the circuit validation fails, the fuzzer reports: +- Number of operations +- Operation sequence that caused failure +- Generator indices +- Detailed operation information including infinity checks and negation flags + +## Performance Considerations + +### Fuzzing Builds +- **Skip Optimization**: Reduces computation for inactive relations +- **FieldVM Settings**: Disabled heavy operations for better performance +- **Controlled Data Size**: Limited FieldVM data to prevent excessive computation + +### Production Builds +- **Maximum Security**: All relations always accumulated +- **No Performance Optimizations**: Ensures correctness over speed + +## Testing Strategy + +The fuzzer is designed to test: + +1. **Basic Operations**: ADD, MUL, EQ_AND_RESET operations +2. **Edge Cases**: Points at infinity, negation, empty operations +3. **Complex Sequences**: Multiple operations with various combinations +4. **Circuit Correctness**: Validation through ECCVMTraceChecker +5. **Memory Safety**: Proper bounds checking and error handling + +## Integration with LibFuzzer + +The fuzzer integrates seamlessly with LibFuzzer: + +1. **Automatic Discovery**: LibFuzzer automatically detects and uses the fuzzer +2. **Corpus Management**: Works with LibFuzzer's corpus management features +3. **Crash Reporting**: Integrates with LibFuzzer's crash reporting +4. **Performance Metrics**: Provides performance data for optimization + +## Troubleshooting + +### Common Issues + +1. **Compilation Errors**: Ensure all required headers are included +2. **Runtime Errors**: Check that input data is properly aligned +3. **Performance Issues**: Verify skip optimization is enabled in fuzzing builds +4. **Memory Issues**: Check for proper bounds checking and error handling + +### Debugging + +Enable debug output by setting environment variables: + +```bash +export FUZZER_DEBUG=1 +./eccvm_fuzzer +``` + +## Contributing + +When modifying the fuzzer: + +1. **Test Thoroughly**: Run extensive fuzzing tests after changes +2. **Maintain Security**: Ensure all security considerations are preserved +3. **Update Documentation**: Keep this README up to date +4. **Performance**: Benchmark changes to ensure no performance regression +5. **Skip Optimization**: Only enable skip optimization in fuzzing builds + +## References + +- [LibFuzzer Documentation](https://llvm.org/docs/LibFuzzer.html) +- [ECCVM Circuit Documentation](../README.md) +- [Barretenberg Fuzzing Guide](../../../docs/fuzzing.md) diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm.fuzzer.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.fuzzer.cpp new file mode 100644 index 000000000000..277e807c6b2a --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.fuzzer.cpp @@ -0,0 +1,263 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#include "barretenberg/crypto/generators/generator_data.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/ecc/fields/field.fuzzer.hpp" +#include "barretenberg/eccvm/eccvm_circuit_builder.hpp" +#include "barretenberg/eccvm/eccvm_trace_checker.hpp" +#include "barretenberg/op_queue/ecc_op_queue.hpp" +#include +#include +#include +#include + +using namespace bb; +using G1 = bb::g1; +using Fr = typename G1::Fr; +using Element = bb::curve::BN254::Element; + +// Security note: This fuzzer generates random ECC operations to test the ECCVM circuit builder +// and trace checker. It focuses on the check_circuit mechanism without full proving to avoid +// potential security issues with proving key generation or proof verification. + +// Operation types for the fuzzer +enum class OpType : uint8_t { ADD = 0, MUL = 1, EQ_AND_RESET = 2, MERGE = 3, EMPTY_ROW = 4, MAX_OP = 5 }; +struct FieldVMDataChunk { + std::array data; +}; + +struct SingleOp { + OpType op_type; + uint8_t generator_index; + uint8_t scalar_index; +}; + +struct FuzzerTuple { + FieldVMDataChunk fieldvm_data; + SingleOp operation; +}; + +struct OperationDetail { + size_t op_index; + OpType op_type; + size_t generator_index; + Fr scalar; + bool is_infinity; + bool should_negate; + + OperationDetail(size_t idx, OpType type, size_t gen_idx, const Fr& sc, bool infinity, bool negate = false) + : op_index(idx) + , op_type(type) + , generator_index(gen_idx) + , scalar(sc) + , is_infinity(infinity) + , should_negate(negate) + {} +}; +static constexpr size_t NUM_GENERATORS = 4; +// Helper function to print operation details +void print_operation_details(size_t op_index, + OpType op_type, + size_t generator_index, + const Fr& scalar, + bool is_infinity, + bool should_negate = false) +{ + std::cout << "Operation " << op_index << ": "; + switch (op_type) { + case OpType::ADD: + std::cout << "ADD(generator=" << generator_index << (should_negate ? ", negated" : "") + << (is_infinity ? ", infinity" : "") << ")"; + break; + case OpType::MUL: + std::cout << "MUL(generator=" << generator_index << ", scalar=" << scalar << (should_negate ? ", negated" : "") + << (is_infinity ? ", infinity" : "") << ")"; + break; + case OpType::EQ_AND_RESET: + std::cout << "EQ_AND_RESET"; + break; + case OpType::MERGE: + std::cout << "MERGE"; + break; + case OpType::EMPTY_ROW: + std::cout << "EMPTY_ROW"; + break; + default: + std::cout << "UNKNOWN(" << static_cast(op_type) << ")"; + break; + } + std::cout << std::endl; +} + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) +{ + + if (Size < sizeof(FuzzerTuple)) { + return 0; // Invalid input size + } + + const FuzzerTuple* input = reinterpret_cast(Data); + + // Validate input parameters + size_t num_operations = (Size) / sizeof(FuzzerTuple); + if (num_operations == 0) { + return 0; + } + + auto total_fieldvm_data_size = num_operations * sizeof(FieldVMDataChunk); + std::vector all_fieldvm_data(total_fieldvm_data_size); + for (size_t i = 0; i < num_operations; ++i) { + std::memcpy( + all_fieldvm_data.data() + i * sizeof(FieldVMDataChunk), &input[i].fieldvm_data, sizeof(FieldVMDataChunk)); + } + + // Pre-compute scalars using FieldVM + std::vector precomputed_scalars; + // Create FieldVM instance for scalar computation + FieldVM field_vm(false, 65536); // Disable debug, max 65536 steps + + // Disable heavy operations for better performance + field_vm.settings.enable_inv = false; // Disable inversion + field_vm.settings.enable_sqrt = false; // Disable square root + field_vm.settings.enable_batch_invert = false; // Disable batch inversion + field_vm.settings.enable_pow = false; // Disable power operation + field_vm.settings.enable_div = false; // Disable division + field_vm.settings.enable_div_assign = false; // Disable division assignment + + // Run FieldVM with the controlled amount of data + field_vm.run(all_fieldvm_data.data(), total_fieldvm_data_size); + + // Extract all field elements from FieldVM state as potential scalars + for (size_t i = 0; i < 32; ++i) { // Use all 32 internal state elements + Fr scalar = field_vm.field_internal_state[i]; + precomputed_scalars.push_back(scalar); + } + + // Create base generators (always create 4 base generators) + auto base_generators = G1::derive_generators("eccvm_fuzzer_generators", NUM_GENERATORS); + std::vector points; + + // Use the first 16 FieldVM elements to create 4 linear combinations of base generators + for (size_t i = 0; i < 4; ++i) { + // Create linear combination: sum of base_generators[j] * precomputed_scalars[i*4 + j] + typename G1::element combined_point = G1::point_at_infinity; + for (size_t j = 0; j < 4; ++j) { + Fr scalar = precomputed_scalars[i * 4 + j]; + combined_point = combined_point + (base_generators[j] * scalar); + } + points.push_back(combined_point); + } + + // Create op queue + std::shared_ptr op_queue = std::make_shared(); + + // Store operation details for potential failure reporting + std::vector operation_details; + + // Process operations + for (size_t i = 0; i < num_operations; ++i) { + const auto& op = input[i].operation; + OpType op_type = op.op_type; + + switch (op_type) { + case OpType::ADD: { + // Use modulo to ensure valid generator index (lower 7 bits) + size_t generator_index = (op.generator_index & 0x7F) % points.size(); + bool should_negate = (op.generator_index & 0x80) != 0; // Top bit controls negation + + typename G1::element point_to_add = points[generator_index]; + if (should_negate) { + point_to_add = -point_to_add; // Negate the point + } + + bool is_infinity = point_to_add.is_point_at_infinity(); + operation_details.emplace_back(i, op_type, generator_index, Fr(0), is_infinity, should_negate); + op_queue->add_accumulate(point_to_add); + break; + } + case OpType::MUL: { + // Use modulo to ensure valid generator index (lower 7 bits) + size_t generator_index = (op.generator_index & 0x7F) % points.size(); + bool should_negate = (op.generator_index & 0x80) != 0; // Top bit controls negation + + // Use pre-computed scalar selected by scalar_indices + Fr scalar = precomputed_scalars[op.scalar_index % precomputed_scalars.size()]; + + typename G1::element point_to_multiply = points[generator_index]; + if (should_negate) { + point_to_multiply = -point_to_multiply; // Negate the point + } + + bool is_infinity = point_to_multiply.is_point_at_infinity(); + operation_details.emplace_back(i, op_type, generator_index, scalar, is_infinity, should_negate); + op_queue->mul_accumulate(point_to_multiply, scalar); + break; + } + case OpType::EQ_AND_RESET: { + operation_details.emplace_back(i, op_type, 0, Fr(0), false, false); + op_queue->eq_and_reset(); + break; + } + case OpType::MERGE: { + operation_details.emplace_back(i, op_type, 0, Fr(0), false, false); + + op_queue->eq_and_reset(); + op_queue->merge(); + break; + } + case OpType::EMPTY_ROW: { + operation_details.emplace_back(i, op_type, 0, Fr(0), false, false); + op_queue->empty_row_for_testing(); + break; + } + default: + operation_details.emplace_back(i, op_type, 0, Fr(0), false, false); + break; + } + } + + // Always merge at the end to finalize the circuit + operation_details.emplace_back(num_operations, OpType::EQ_AND_RESET, 0, Fr(0), false, false); + op_queue->eq_and_reset(); + + operation_details.emplace_back(num_operations + 1, OpType::MERGE, 0, Fr(0), false, false); + op_queue->merge(); + + // Create circuit builder + ECCVMCircuitBuilder circuit{ op_queue }; + + // Test the check_circuit mechanism + bool result = ECCVMTraceChecker::check(circuit, nullptr, /* disable_fixed_dyadic_trace_size= */ true); + // The circuit should always be valid if our operations are well-formed + // If check fails, it might indicate a bug in the circuit builder or trace checker + if (!result) { + std::cout << "ERROR: ECCVMTraceChecker::check returned false!" << std::endl; + std::cout << "Input parameters:" << std::endl; + std::cout << " num_operations: " << num_operations << std::endl; + std::cout << " operations: "; + for (size_t i = 0; i < num_operations; ++i) { + std::cout << static_cast(input[i].operation.op_type) << " "; + } + std::cout << std::endl; + std::cout << " generator_indices: "; + for (size_t i = 0; i < num_operations; ++i) { + std::cout << static_cast(input[i].operation.generator_index) << " "; + } + std::cout << std::endl; + + // Print operation details that led to the failure + std::cout << "Operation sequence that caused failure:" << std::endl; + for (const auto& op : operation_details) { + print_operation_details( + op.op_index, op.op_type, op.generator_index, op.scalar, op.is_infinity, op.should_negate); + } + } + + assert(result == true); + + return 0; +} diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp index 8d8df1ecdf3f..d96a350b2612 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp @@ -66,18 +66,26 @@ ECCVMCircuitBuilder generate_circuit(numeric::RNG* engine = nullptr) return builder; } -ECCVMCircuitBuilder generate_zero_circuit([[maybe_unused]] numeric::RNG* engine = nullptr) +// returns a CircuitBuilder consisting of mul_add ops of the following form: either 0*g, for a group element, or +// x * e, where x is a scalar and e is the identity element of the group. +ECCVMCircuitBuilder generate_zero_circuit([[maybe_unused]] numeric::RNG* engine = nullptr, bool zero_scalars = 1) { using Curve = curve::BN254; using G1 = Curve::Element; using Fr = Curve::ScalarField; std::shared_ptr op_queue = std::make_shared(); - [[maybe_unused]] G1 a = G1::random_element(engine); - [[maybe_unused]] Fr x = Fr::random_element(engine); - for (auto i = 0; i < 8; i++) { - op_queue->mul_accumulate(Curve::Group::affine_point_at_infinity, 0); + if (!zero_scalars) { + for (auto i = 0; i < 8; i++) { + Fr x = Fr::random_element(engine); + op_queue->mul_accumulate(Curve::Group::affine_point_at_infinity, x); + } + } else { + for (auto i = 0; i < 8; i++) { + G1 g = G1::random_element(engine); + op_queue->mul_accumulate(g, 0); + } } op_queue->merge(); @@ -113,9 +121,47 @@ void complete_proving_key_for_test(bb::RelationParameters& relation_paramete gate_challenges[idx] = FF::random_element(); } } -TEST_F(ECCVMTests, Zeroes) +TEST_F(ECCVMTests, ZeroesCoefficients) { - ECCVMCircuitBuilder builder = generate_zero_circuit(&engine); + ECCVMCircuitBuilder builder = generate_zero_circuit(&engine, 1); + + std::shared_ptr prover_transcript = std::make_shared(); + ECCVMProver prover(builder, prover_transcript); + ECCVMProof proof = prover.construct_proof(); + + std::shared_ptr verifier_transcript = std::make_shared(); + ECCVMVerifier verifier(verifier_transcript); + bool verified = verifier.verify_proof(proof); + + ASSERT_TRUE(verified); +} +TEST_F(ECCVMTests, PointAtInfinity) +{ + ECCVMCircuitBuilder builder = generate_zero_circuit(&engine, 0); + + std::shared_ptr prover_transcript = std::make_shared(); + ECCVMProver prover(builder, prover_transcript); + ECCVMProof proof = prover.construct_proof(); + + std::shared_ptr verifier_transcript = std::make_shared(); + ECCVMVerifier verifier(verifier_transcript); + bool verified = verifier.verify_proof(proof); + + ASSERT_TRUE(verified); +} +TEST_F(ECCVMTests, ScalarEdgeCase) +{ + using Curve = curve::BN254; + using G1 = Curve::Element; + using Fr = Curve::ScalarField; + + std::shared_ptr op_queue = std::make_shared(); + G1 a = G1::one(); + + op_queue->mul_accumulate(a, Fr(uint256_t(1) << 128)); + op_queue->eq_and_reset(); + op_queue->merge(); + ECCVMCircuitBuilder builder{ op_queue }; std::shared_ptr prover_transcript = std::make_shared(); ECCVMProver prover(builder, prover_transcript); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_builder_types.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_builder_types.hpp index 6523afd99766..0f6ea51fe818 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_builder_types.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_builder_types.hpp @@ -11,19 +11,26 @@ namespace bb::eccvm { static constexpr size_t NUM_SCALAR_BITS = 128; // The length of scalars handled by the ECCVVM -static constexpr size_t NUM_WNAF_DIGIT_BITS = 4; // Scalars are decompose into base 16 in wNAF form +static constexpr size_t NUM_WNAF_DIGIT_BITS = 4; // Scalars are decomposed into base 16 in wNAF form static constexpr size_t NUM_WNAF_DIGITS_PER_SCALAR = NUM_SCALAR_BITS / NUM_WNAF_DIGIT_BITS; // 32 static constexpr uint64_t WNAF_MASK = static_cast((1ULL << NUM_WNAF_DIGIT_BITS) - 1ULL); -static constexpr size_t POINT_TABLE_SIZE = 1ULL << (NUM_WNAF_DIGIT_BITS); +static constexpr size_t POINT_TABLE_SIZE = + 1ULL << (NUM_WNAF_DIGIT_BITS); // Corresponds to the odd multiples of [P] between -(2^w - 1) and 2^w - 1. static constexpr size_t WNAF_DIGITS_PER_ROW = 4; -static constexpr size_t ADDITIONS_PER_ROW = 4; +static constexpr size_t ADDITIONS_PER_ROW = + 4; // In the Straus algorithm for MSM, we proceed "digit-by-digit". (Here, digit means wNAF digit.) We chunk + // `ADDITIONS_PER_ROW` additions, all in the *same digit-slot*, in a row of the ECCVM's MSM table. Various parts + // of the implemention exploit the fact that `ADDITIONS_PER_ROWS == NUM_WNAF_DIGIT_BITS`. template struct ScalarMul { uint32_t pc; uint256_t scalar; typename CycleGroup::affine_element base_point; - std::array wnaf_digits; - bool wnaf_skew; + std::array + wnaf_digits; // [a_{n-1}, a_{n-1}, ..., a_{0}], where each a_i ∈ {-2ʷ⁻¹ + 1, -2ʷ⁻¹ + 3, ..., 2ʷ⁻¹ - 3, 2ʷ⁻¹ - + // 1}. (here, w = `NUM_WNAF_DIGIT_BITS`). in particular, a_i is an odd integer with + // absolute value less than 2ʷ. Represents the number `scalar` = ∑ᵢ aᵢ 2⁴ⁱ - `wnaf_skew`. + bool wnaf_skew; // necessary to represent _even_ integers // size bumped by 1 to record base_point.dbl() std::array precomputed_table; }; diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_circuit_builder.hpp index bdcc69cd888f..ef530c3bca32 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_circuit_builder.hpp @@ -39,20 +39,27 @@ class ECCVMCircuitBuilder { static constexpr size_t WNAF_DIGITS_PER_ROW = bb::eccvm::WNAF_DIGITS_PER_ROW; static constexpr size_t ADDITIONS_PER_ROW = bb::eccvm::ADDITIONS_PER_ROW; - using MSM = bb::eccvm::MSM; std::shared_ptr op_queue; + // `ScalarMul` represents a single scalar multiplication, i.e., a pair of a scalar and point on the curve, + // which will eventually be multiplied and accumulated. using ScalarMul = bb::eccvm::ScalarMul; - + // `MSM` is an ordered container of `ScalarMul`s + using MSM = bb::eccvm::MSM; ECCVMCircuitBuilder(std::shared_ptr& op_queue) - : op_queue(op_queue){}; + : op_queue(op_queue) {}; [[nodiscard]] uint32_t get_number_of_muls() const { return op_queue->get_number_of_muls(); } std::vector get_msms() const { const uint32_t num_muls = get_number_of_muls(); + + // `compute_precomputed_table` and `compute_wnaf_digits` are helper functions that will be used when we + // populate our vector of MSMs. + /** * For input point [P], return { -15[P], -13[P], ..., -[P], [P], ..., 13[P], 15[P] } + * this "precomputed table" will be an entry in `ScalarMuls` corresponding to [P] */ const auto compute_precomputed_table = [](const AffineElement& base_point) -> std::array { @@ -74,6 +81,15 @@ class ECCVMCircuitBuilder { } return result; }; + /** + * Computes the WNAF representation of `scalar`. When `scalar` is even, we represent this by adding 1 to the + * least-significant slice. we will also later set the `skew` boolean to True when we populate `ScalarMul`. + * (this is necessary because otherwise we would only be able to represent odd multiples of our point.) + * Note also that in our applications, `NUM_WNAF_DIGITS_PER_SCALAR = 32`; this corresponds to the fact that we + * split up our scalar into two 128 bit numbers, using the endomorphism of the curve (corresponding to a + * primitive cube root of unity). + * + */ const auto compute_wnaf_digits = [](uint256_t scalar) -> std::array { std::array output; int previous_slice = 0; @@ -89,7 +105,7 @@ class ECCVMCircuitBuilder { // if least significant slice is even, we add 1 to create an odd value && set 'skew' to true wnaf_slice += 1; } else if (is_even) { - // for other slices, if it's even, we add 1 to the slice value + // for other slices, if it's even, we add 1 to the slice value, again to create an odd value, // and subtract 16 from the previous slice to preserve the total scalar sum static constexpr int borrow_constant = static_cast(1ULL << NUM_WNAF_DIGIT_BITS); previous_slice -= borrow_constant; @@ -112,11 +128,26 @@ class ECCVMCircuitBuilder { return output; }; + // the variables and vectors here correspond to the EC ops that we will actually do; in particular, we have + // compilation skipping logic for both when the scalar is 0 and when the EC point is the point-at-infinity, as + // terms of each of these types do not contribute to the final sum. - size_t msm_count = 0; - size_t active_mul_count = 0; - std::vector msm_opqueue_index; - std::vector> msm_mul_index; + // more precisely, we will break up our op_queue into a sequence of MSMs, where we throw away computations that + // obviously don't contribute to the final desired value. + size_t msm_count = 0; // total number of MSMs + size_t active_mul_count = + 0; // number of scalar multiplications required in the current MSM. Given a scalar n in F_q and a point P, + // we in general get *two* scalar multiplications, as we break up n into 128-bit chunks (using the extra + // endomorphism). this is an optimization. + std::vector + msm_opqueue_index; // a vector recording which op from the op_queue we are performing in our VM. + std::vector> + msm_mul_index; // recording pairs, where the first element specifies "which MSM are we in" (via an index) + // and the second element specifies "which scalar multiplication is this in our VM simulation + // of this MSM". note that the second element, the `active_mul_count`, incorporates some + // skipping logic: what contributes to it are multiplications we actually need to perform. + // generically each scalar multiplication contributes to 2 VM mul operations, as we + // split up each Fq element into 2 128-bit elements. std::vector msm_sizes; const auto& eccvm_ops = op_queue->get_eccvm_ops(); @@ -141,12 +172,18 @@ class ECCVMCircuitBuilder { msm_sizes.push_back(active_mul_count); msm_count++; } - std::vector result(msm_count); + + std::vector result( + msm_count); // the vector we will return, containing all of the MSMs that our VM will have to perform. + // this amounts to breaking up our op-queue, splitting the elmenets of Fq into two 128 + // bit scalars, and throwing out operations that a priori won't contribute. for (size_t i = 0; i < msm_count; ++i) { auto& msm = result[i]; msm.resize(msm_sizes[i]); } - + // populate result using the auxiliary vectors `msm_opqueue_index` and `msm_mul_index`, together with + // `eccvm_ops`. this first pass will *not* get the pc (point counter) correct. we explain why when we set it + // correctly. parallel_for_range(msm_opqueue_index.size(), [&](size_t start, size_t end) { for (size_t i = start; i < end; i++) { const auto& op = eccvm_ops[msm_opqueue_index[i]]; @@ -180,14 +217,16 @@ class ECCVMCircuitBuilder { } }); - // update pc. easier to do this serially but in theory could be optimised out + // update pc. easier to do this serially but in theory could be optimized out // We start pc at `num_muls` and decrement for each mul processed. // This gives us two desired properties: // 1: the value of pc at the 1st row = number of muls (easy to check) // 2: the value of pc for the final mul = 1 // The latter point is valuable as it means that we can add empty rows (where pc = 0) and still satisfy our // sumcheck relations that involve pc (if we did the other way around, starting at 1 and ending at num_muls, - // we create a discontinuity in pc values between the last transcript row and the following empty row) + // we create a discontinuity in pc values between the last transcript row and the following empty row). + // TL;DR we choose a decreasing `pc` so that the subsequent entries of the column (after the last entry) are 0. + // this is simply an optimization. uint32_t pc = num_muls; for (auto& msm : result) { for (auto& mul : msm) { diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index 3347aac2ce95..ba07de665da1 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -7,6 +7,7 @@ #pragma once #include "barretenberg/commitment_schemes/ipa/ipa.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/std_array.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" @@ -465,6 +466,9 @@ class ECCVMFlavor { } } +#ifdef FUZZING + ProverPolynomials(const CircuitBuilder& builder, bool disable_fixed_dyadic_trace_size = false) +#else /** * @brief Compute the ECCVM flavor polynomial data required to generate an ECCVM Proof * @@ -509,7 +513,7 @@ class ECCVMFlavor { * transcript_msm_count_at_transition_inverse: used to validate transcript_msm_count_zero_at_transition * precompute_pc: point counter for Straus precomputation columns * precompute_select: if 1, evaluate Straus precomputation algorithm at current row - * precompute_point_transition: 1 if current row operating on a different point to previous row + * precompute_point_transition: 1 if next row operating on a different point than current row. * precompute_round: round counter for Straus precomputation algorithm * precompute_scalar_sum: accumulating sum of Straus scalar slices * precompute_s1hi/lo: 2-bit hi/lo components of a Straus 4-bit scalar slice @@ -546,6 +550,10 @@ class ECCVMFlavor { * msm_lambda2: temp variable used for ecc point addition algorithm if msm_add2 = 1 * msm_lambda3: temp variable used for ecc point addition algorithm if msm_add3 = 1 * msm_lambda4: temp variable used for ecc point addition algorithm if msm_add4 = 1 + * msm_slice1: wNAF digit/slice for first add + * msm_slice2: wNAF digit/slice for second add + * msm_slice3: wNAF digit/slice for third add + * msm_slice4: wNAF digit/slice for fourth add * msm_collision_x1: used to ensure incomplete ecc addition exceptions not triggered if msm_add1 = 1 * msm_collision_x2: used to ensure incomplete ecc addition exceptions not triggered if msm_add2 = 1 * msm_collision_x3: used to ensure incomplete ecc addition exceptions not triggered if msm_add3 = 1 @@ -557,6 +565,7 @@ class ECCVMFlavor { * @return ProverPolynomials */ ProverPolynomials(const CircuitBuilder& builder) +#endif { // compute rows for the three different sections of the ECCVM execution trace const auto transcript_rows = @@ -578,7 +587,16 @@ class ECCVMFlavor { std::to_string(ECCVM_FIXED_SIZE) + " actual size: " + std::to_string(dyadic_num_rows)); } +#ifdef FUZZING + // We don't want to spend all the time generating the full trace if we are just fuzzing eccvm. + if (disable_fixed_dyadic_trace_size) { + dyadic_num_rows = num_rows; + } else { + dyadic_num_rows = ECCVM_FIXED_SIZE; + } +#else dyadic_num_rows = ECCVM_FIXED_SIZE; +#endif size_t unmasked_witness_size = dyadic_num_rows - NUM_DISABLED_ROWS_IN_SUMCHECK; for (auto& poly : get_to_be_shifted()) { @@ -776,10 +794,6 @@ class ECCVMFlavor { */ class VerificationKey : public NativeVerificationKey_, Transcript> { public: - // Serialized Verification Key length in fields - static constexpr size_t VERIFICATION_KEY_LENGTH = - /* 1. NUM_PRECOMPUTED_ENTITIES commitments */ (NUM_PRECOMPUTED_ENTITIES * num_frs_comm); - bool operator==(const VerificationKey&) const = default; // IPA verification key requires one more point. @@ -814,26 +828,6 @@ class ECCVMFlavor { } } - /** - * @brief Serialize verification key to field elements - * - * @return std::vector - */ - std::vector to_field_elements() const override - { - using namespace bb::field_conversion; - - auto serialize_to_field_buffer = [](const T& input, std::vector& buffer) { - std::vector input_fields = convert_to_bn254_frs(input); - buffer.insert(buffer.end(), input_fields.begin(), input_fields.end()); - }; - - std::vector elements; - for (const Commitment& commitment : this->get_all()) { - serialize_to_field_buffer(commitment, elements); - } - return elements; - } /** * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * @@ -841,18 +835,14 @@ class ECCVMFlavor { * @param transcript * @returns The hash of the verification key */ - fr add_hash_to_transcript([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + fr hash_through_transcript([[maybe_unused]] const std::string& domain_separator, + [[maybe_unused]] Transcript& transcript) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } // TODO(https://github.com/AztecProtocol/barretenberg/issues/1324): Remove `circuit_size` and `log_circuit_size` - // from MSGPACK and the verification key. - // Don't statically check for object completeness. - using MSGPACK_NO_STATIC_CHECK = std::true_type; - MSGPACK_FIELDS( - log_circuit_size, num_public_inputs, pub_inputs_offset, lagrange_first, lagrange_second, lagrange_last); + // from the verification key. }; /** @@ -1015,10 +1005,10 @@ class ECCVMFlavor { size_t old_proof_length = NativeTranscript::proof_data.size(); NativeTranscript::proof_data.clear(); - NativeTranscript::template serialize_to_buffer(ipa_poly_degree, NativeTranscript::proof_data); + NativeTranscript::serialize_to_buffer(ipa_poly_degree, NativeTranscript::proof_data); for (size_t i = 0; i < CONST_ECCVM_LOG_N; ++i) { - NativeTranscript::template serialize_to_buffer(ipa_l_comms[i], NativeTranscript::proof_data); - NativeTranscript::template serialize_to_buffer(ipa_r_comms[i], NativeTranscript::proof_data); + NativeTranscript::serialize_to_buffer(ipa_l_comms[i], NativeTranscript::proof_data); + NativeTranscript::serialize_to_buffer(ipa_r_comms[i], NativeTranscript::proof_data); } serialize_to_buffer(ipa_G_0_eval, proof_data); @@ -1029,33 +1019,37 @@ class ECCVMFlavor { }; /** - * @brief When evaluating the sumcheck protocol - can we skip evaluation of all relations for a given row? + * @brief When evaluating the sumcheck protocol - can we skip evaluation of _all_ relations for a given row? This + * is purely a prover-side optimization. * * @details When used in ClientIVC, the ECCVM has a large fixed size, which is often not fully utilized. - * If a row is completely empty, the values of z_perm and z_perm_shift will match, - * we can use this as a proxy to determine if we can skip Sumcheck::compute_univariate_with_row_skipping + * If a row is completely empty, the values of `z_perm` and `z_perm_shift` will match, + * we can use this as a proxy to determine if we can skip `Sumcheck::compute_univariate_with_row_skipping`. + * In fact, here are several other conditions that need to be checked to see if we can skip the computation + * of all relations in the row. **/ template static bool skip_entire_row([[maybe_unused]] const ProverPolynomialsOrPartiallyEvaluatedMultivariates& polynomials, [[maybe_unused]] const EdgeType edge_idx) { - // skip conditions. TODO: add detailed commentary during audit. + // SKIP CONDITIONS: // The most important skip condition is that `z_perm == z_perm_shift`. This implies that none of the wire values // for the present input are involved in non-trivial copy constraints. Edge cases where nonzero rows do not // contribute to permutation: // // 1: If `lagrange_last != 0`, the permutation polynomial identity is updated even if - // z_perm == z_perm_shift + // z_perm == z_perm_shift. Therefore, we must force it to be zero. // // 2: The final MSM row won't add to the permutation but still has polynomial identitiy // contributions. This is because the permutation argument uses the SHIFTED msm columns when performing - // lookups i.e. `polynomials.msm_accumulator_x[last_edge_idx] will change z_perm[last_edge_idx - 1] and - // z_perm_shift[last_edge_idx - 1] + // lookups i.e. `msm_accumulator_x[last_edge_idx]` will change `z_perm[last_edge_idx - 1]` and + // `z_perm_shift[last_edge_idx - 1]` // - // 3. The value of `transcript_mul` can be non-zero at the end of a long MSM of points-at-infinity, which will - // cause `full_msm_count` to be non-zero while `transcript_msm_count` vanishes. + // 3. The value of `transcript_mul` is non-zero at the end of an MSM of points-at-infinity, which will + // cause `full_msm_count` to be non-zero while `transcript_msm_count` vanishes. We therefore force + // transcript_mul == 0 as a skip-row condition. // - // 4. For similar reasons, we must add that `transcript_op==0`. + // 4: We also force that `transcript_op==0`. return (polynomials.z_perm[edge_idx] == polynomials.z_perm_shift[edge_idx]) && (polynomials.z_perm[edge_idx + 1] == polynomials.z_perm_shift[edge_idx + 1]) && (polynomials.lagrange_last[edge_idx] == 0 && polynomials.lagrange_last[edge_idx + 1]) == 0 && @@ -1064,7 +1058,4 @@ class ECCVMFlavor { (polynomials.transcript_op[edge_idx] == 0 && polynomials.transcript_op[edge_idx + 1] == 0); } }; - -// NOLINTEND(cppcoreguidelines-avoid-const-or-ref-data-members) - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp index bd6a633b3352..0ddf669d6bf1 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp @@ -10,6 +10,7 @@ #include "barretenberg/commitment_schemes/shplonk/shplemini.hpp" #include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" #include "barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/ref_array.hpp" #include "barretenberg/honk/library/grand_product_library.hpp" #include "barretenberg/honk/proof_system/logderivative_library.hpp" @@ -24,7 +25,7 @@ ECCVMProver::ECCVMProver(CircuitBuilder& builder, : transcript(transcript) , ipa_transcript(ipa_transcript) { - PROFILE_THIS_NAME("ECCVMProver(CircuitBuilder&)"); + BB_BENCH_NAME("ECCVMProver(CircuitBuilder&)"); // TODO(https://github.com/AztecProtocol/barretenberg/issues/939): Remove redundancy between // ProvingKey/ProverPolynomials and update the model to reflect what's done in all other proving systems. @@ -56,31 +57,18 @@ void ECCVMProver::execute_preamble_round() */ void ECCVMProver::execute_wire_commitments_round() { + BB_BENCH_NAME("ECCVMProver::execute_wire_commitments_round"); // To commit to the masked wires when `real_size` < `circuit_size`, we use // `commit_structured` that ignores 0 coefficients between the real size and the last NUM_DISABLED_ROWS_IN_SUMCHECK // wire entries. const size_t circuit_size = key->circuit_size; unmasked_witness_size = circuit_size - NUM_DISABLED_ROWS_IN_SUMCHECK; - CommitmentKey::CommitType commit_type = - (circuit_size > key->real_size) ? CommitmentKey::CommitType::Structured : CommitmentKey::CommitType::Default; - - // Commit to wires whose length is bounded by the real size of the ECCVM - for (const auto& [wire, label] : zip_view(key->polynomials.get_wires_without_accumulators(), - commitment_labels.get_wires_without_accumulators())) { - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1240) Structured Polynomials in - // ECCVM/Translator/MegaZK - const size_t start = circuit_size == wire.size() ? 0 : 1; - std::vector> active_ranges{ { start, key->real_size + start }, - { unmasked_witness_size, circuit_size } }; - commit_to_witness_polynomial(wire, label, commit_type, active_ranges); - } - - // The accumulators are populated until the 2^{CONST_ECCVM_LOG_N}, therefore we commit to a full-sized polynomial - for (const auto& [wire, label] : - zip_view(key->polynomials.get_accumulators(), commitment_labels.get_accumulators())) { - commit_to_witness_polynomial(wire, label); + auto batch = key->commitment_key.start_batch(); + for (const auto& [wire, label] : zip_view(key->polynomials.get_wires(), commitment_labels.get_wires())) { + batch.add_to_batch(wire, label, /* mask for zk? */ true); } + batch.commit_and_send_to_verifier(transcript); } /** @@ -89,6 +77,7 @@ void ECCVMProver::execute_wire_commitments_round() */ void ECCVMProver::execute_log_derivative_commitments_round() { + BB_BENCH_NAME("ECCVMProver::execute_log_derivative_commitments_round"); // Compute and add beta to relation parameters auto [beta, gamma] = transcript->template get_challenges("beta", "gamma"); @@ -116,6 +105,7 @@ void ECCVMProver::execute_log_derivative_commitments_round() */ void ECCVMProver::execute_grand_product_computation_round() { + BB_BENCH_NAME("ECCVMProver::execute_grand_product_computation_round"); // Compute permutation grand product and their commitments compute_grand_products(key->polynomials, relation_parameters, unmasked_witness_size); commit_to_witness_polynomial(key->polynomials.z_perm, commitment_labels.z_perm); @@ -127,7 +117,7 @@ void ECCVMProver::execute_grand_product_computation_round() */ void ECCVMProver::execute_relation_check_rounds() { - + BB_BENCH_NAME("ECCVMProver::execute_relation_check_rounds"); using Sumcheck = SumcheckProver; // Each linearly independent subrelation contribution is multiplied by `alpha^i`, where @@ -160,6 +150,7 @@ void ECCVMProver::execute_relation_check_rounds() */ void ECCVMProver::execute_pcs_rounds() { + BB_BENCH_NAME("ECCVMProver::execute_pcs_rounds"); using Curve = typename Flavor::Curve; using Shplemini = ShpleminiProver_; using Shplonk = ShplonkProver_; @@ -207,7 +198,7 @@ ECCVMProof ECCVMProver::export_proof() ECCVMProof ECCVMProver::construct_proof() { - PROFILE_THIS_NAME("ECCVMProver::construct_proof"); + BB_BENCH_NAME("ECCVMProver::construct_proof"); execute_preamble_round(); execute_wire_commitments_round(); @@ -290,7 +281,7 @@ void ECCVMProver::compute_translation_opening_claims() for (auto [eval, poly, label] : zip_view(translation_evaluations.get_all(), translation_polynomials, translation_evaluations.labels)) { eval = poly.evaluate(evaluation_challenge_x); - transcript->template send_to_verifier(label, eval); + transcript->send_to_verifier(label, eval); } // Get another challenge to batch the evaluations of the transcript polynomials diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp index f1bafc5373a2..3b04c18fecd0 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp @@ -15,7 +15,13 @@ using Builder = typename ECCVMFlavor::CircuitBuilder; using FF = typename ECCVMFlavor::FF; using ProverPolynomials = typename ECCVMFlavor::ProverPolynomials; -bool ECCVMTraceChecker::check(Builder& builder, numeric::RNG* engine_ptr) +bool ECCVMTraceChecker::check(Builder& builder, + numeric::RNG* engine_ptr +#ifdef FUZZING + , + bool disable_fixed_dyadic_trace_size +#endif +) { const FF gamma = FF::random_element(engine_ptr); const FF beta = FF::random_element(engine_ptr); @@ -29,13 +35,16 @@ bool ECCVMTraceChecker::check(Builder& builder, numeric::RNG* engine_ptr) .beta = beta, .gamma = gamma, .public_input_delta = 0, - .lookup_grand_product_delta = 0, .beta_sqr = beta_sqr, .beta_cube = beta_cube, .eccvm_set_permutation_delta = eccvm_set_permutation_delta, }; +#ifdef FUZZING + ProverPolynomials polynomials(builder, disable_fixed_dyadic_trace_size); +#else ProverPolynomials polynomials(builder); +#endif const size_t num_rows = polynomials.get_polynomial_size(); const size_t unmasked_witness_size = num_rows - NUM_DISABLED_ROWS_IN_SUMCHECK; compute_logderivative_inverse>(polynomials, params, unmasked_witness_size); @@ -51,7 +60,22 @@ bool ECCVMTraceChecker::check(Builder& builder, numeric::RNG* engine_ptr) constexpr size_t NUM_SUBRELATIONS = result.size(); for (size_t i = 0; i < num_rows; ++i) { - Relation::accumulate(result, polynomials.get_row(i), params, 1); + auto row = polynomials.get_row(i); +#ifdef FUZZING + // Check if the relation is skippable and should be skipped (only in fuzzing builds) + if constexpr (isSkippable) { + // Only accumulate if the relation should not be skipped + if (!Relation::skip(row)) { + Relation::accumulate(result, row, params, 1); + } + } else { + // If not skippable, always accumulate + Relation::accumulate(result, row, params, 1); + } +#else + // In non-fuzzing builds, always accumulate for maximum security + Relation::accumulate(result, row, params, 1); +#endif bool x = true; for (size_t j = 0; j < NUM_SUBRELATIONS; ++j) { diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.hpp index ed1a01582223..eb29803b1967 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.hpp @@ -10,6 +10,12 @@ namespace bb { class ECCVMTraceChecker { public: - static bool check(ECCVMCircuitBuilder&, numeric::RNG* engine_ptr = nullptr); + static bool check(ECCVMCircuitBuilder&, + numeric::RNG* engine_ptr = nullptr +#ifdef FUZZING + , + bool disable_fixed_dyadic_trace_size = false +#endif + ); }; } // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_translation_data.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_translation_data.hpp index e8c353490968..93cdec90a907 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_translation_data.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_translation_data.hpp @@ -78,8 +78,8 @@ template class TranslationData { compute_concatenated_polynomials(transcript_polynomials); // Commit to M(X) + Z_H(X)*R(X), where R is a random polynomial of WITNESS_MASKING_TERM_LENGTH. - transcript->template send_to_verifier("Translation:concatenated_masking_term_commitment", - commitment_key.commit(masked_concatenated_polynomial)); + transcript->send_to_verifier("Translation:concatenated_masking_term_commitment", + commitment_key.commit(masked_concatenated_polynomial)); } /** * @brief Let \f$ T = NUM_TRANSLATION_EVALUATIONS \f$ and let \f$ m_0, ..., m_{T-1}\f$ be the vectors of last \f$ diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp index 2162bfdefd8c..fd32bc9fcc34 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp @@ -24,7 +24,7 @@ class ECCVMVerifier { public: explicit ECCVMVerifier(const std::shared_ptr& transcript) - : transcript(transcript){}; + : transcript(transcript) {}; bool verify_proof(const ECCVMProof& proof); void compute_translation_opening_claims( diff --git a/barretenberg/cpp/src/barretenberg/eccvm/msm_builder.hpp b/barretenberg/cpp/src/barretenberg/eccvm/msm_builder.hpp index d2d63362153b..072ef9094613 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/msm_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/msm_builder.hpp @@ -27,39 +27,61 @@ class ECCVMMSMMBuilder { static constexpr size_t NUM_WNAF_DIGITS_PER_SCALAR = bb::eccvm::NUM_WNAF_DIGITS_PER_SCALAR; struct alignas(64) MSMRow { - // counter over all half-length scalar muls used to compute the required MSMs - uint32_t pc = 0; - // the number of points that will be scaled and summed - uint32_t msm_size = 0; - uint32_t msm_count = 0; - uint32_t msm_round = 0; - bool msm_transition = false; + uint32_t pc = 0; // decreasing point-counter, over all half-length (128 bit) scalar muls used to compute + // the required MSMs. however, this value is _constant_ on a given MSM and more precisely + // refers to the number of half-length scalar muls completed up until we have started + // the current MSM. + uint32_t msm_size = 0; // the number of points in (a.k.a. the length of) the MSM in whose computation + // this VM row participates + uint32_t msm_count = 0; // number of multiplications processed so far (not including this row) in current MSM + // round (a.k.a. wNAF digit slot). this specifically refers to the number of wNAF-digit + // * point scalar products we have looked up and accumulated. + uint32_t msm_round = 0; // current "round" of MSM, in {0, ..., 32 = `NUM_WNAF_DIGITS_PER_SCALAR`}. With the + // Straus algorithm, we proceed wNAF digit by wNAF digit, from left to right. (final + // round deals with the `skew` bit.) + bool msm_transition = false; // is 1 if the current row *starts* the processing of a different MSM, else 0. bool q_add = false; bool q_double = false; bool q_skew = false; + // Each row in the MSM portion of the ECCVM can handle (up to) 4 point-additions. + // For each row in the VM we represent the point addition data via a size-4 array of + // AddState objects. struct AddState { - bool add = false; - int slice = 0; - AffineElement point{ 0, 0 }; - FF lambda = 0; - FF collision_inverse = 0; + bool add = false; // are we adding a point at this location in the VM? + // e.g if the MSM is of size-2 then the 3rd and 4th AddState objects will have this set + // to `false`. + int slice = 0; // wNAF slice value. This has values in {0, ..., 15} and corresponds to an odd number in the + // range {-15, -13, ..., 15} via the monotonic bijection. + AffineElement point{ 0, 0 }; // point being added into the accumulator. (This is of the form nP, + // where n is in {-15, -13, ..., 15}.) + FF lambda = 0; // when adding `point` into the accumulator via Affine point addition, the value of `lambda` + // (i.e., the slope of the line). (we need this as a witness in the circuit.) + FF collision_inverse = 0; // `collision_inverse` is used to validate we are not hitting point addition edge + // case exceptions, i.e., we want the VM proof to fail if we're doing a point + // addition where (x1 == x2). to do this, we simply provide an inverse to x1 - x2. }; std::array add_state{ AddState{ false, 0, { 0, 0 }, 0, 0 }, AddState{ false, 0, { 0, 0 }, 0, 0 }, AddState{ false, 0, { 0, 0 }, 0, 0 }, AddState{ false, 0, { 0, 0 }, 0, 0 } }; - FF accumulator_x = 0; - FF accumulator_y = 0; + // The accumulator here is, in general, the result of four EC additions: A + Q_1 + Q_2 + Q_3 + Q_4. + // We do not explicitly store the intermediate values A + Q_1, A + Q_1 + Q_2, and A + Q_1 + Q_2 + Q_3, although + // these values are implicitly used in the values of `AddState.lambda` and `AddState.collision_inverse`. + + FF accumulator_x = 0; // `(accumulator_x, accumulator_y)` is the accumulator to which I potentially want to add + // the points in `add_state`. + FF accumulator_y = 0; // `(accumulator_x, accumulator_y)` is the accumulator to which I potentially want to add + // the points in `add_state`. }; /** * @brief Computes the row values for the Straus MSM columns of the ECCVM. * * For a detailed description of the Straus algorithm and its relation to the ECCVM, please see - * https://hackmd.io/@aztec-network/rJ5xhuCsn + * https://hackmd.io/@aztec-network/rJ5xhuCsn or, alternatively, the [ECCVM readme](README.md). * - * @param msms A vector of vectors of ScalarMuls. + * @param msms A vector of vectors of `ScalarMul`s, a.k.a. a vector of `MSM`s. * @param point_table_read_counts Table of read counts to be populated. * @param total_number_of_muls A mul op in the OpQueue adds up to two muls, one for each nonzero z_i (i=1,2). * @param num_msm_rows @@ -71,7 +93,8 @@ class ECCVMMSMMBuilder { // To perform a scalar multiplication of a point P by a scalar x, we precompute a table of points // -15P, -13P, ..., -3P, -P, P, 3P, ..., 15P // When we perform a scalar multiplication, we decompose x into base-16 wNAF digits then look these precomputed - // values up with digit-by-digit. We record read counts in a table with the following structure: + // values up with digit-by-digit. As we are performing lookups with the log-derivative argument, we have to + // record read counts. We record read counts in a table with the following structure: // 1st write column = positive wNAF digits // 2nd write column = negative wNAF digits // the row number is a function of pc and wnaf digit: @@ -79,20 +102,29 @@ class ECCVMMSMMBuilder { // row = point_idx * rows_per_point_table + (some function of the slice value) // // Illustration: - // Block Structure Table structure: - // | 0 | 1 | | Block_{0} | <-- pc = total_number_of_muls - // | - | - | | Block_{1} | <-- pc = total_number_of_muls-(num muls in msm 0) - // 1 | # | # | -1 | ... | ... - // 3 | # | # | -3 | Block_{total_number_of_muls-1} | <-- pc = num muls in last msm + // Block Structure: + // | 0 | 1 | + // | - | - | + // 1 | # | # | -1 + // 3 | # | # | -3 // 5 | # | # | -5 // 7 | # | # | -7 // 9 | # | # | -9 // 11 | # | # | -11 // 13 | # | # | -13 // 15 | # | # | -15 + // + // Table structure: + // | Block_{0} | <-- pc = total_number_of_muls + // | Block_{1} | <-- pc = total_number_of_muls-(num muls in msm 0) + // | ... | ... + // | Block_{total_number_of_muls-1} | <-- pc = num muls in last msm const size_t num_rows_in_read_counts_table = - static_cast(total_number_of_muls) * (eccvm::POINT_TABLE_SIZE >> 1); + static_cast(total_number_of_muls) * + (eccvm::POINT_TABLE_SIZE >> 1); // `POINT_TABLE_SIZE` is 2ʷ, where in our case w = 4. As noted above, with + // respect to *read counts*, we are record looking up the positive and + // negative odd multiples of [P] in two separate columns, each of size 2ʷ⁻¹. std::array, 2> point_table_read_counts; point_table_read_counts[0].reserve(num_rows_in_read_counts_table); point_table_read_counts[1].reserve(num_rows_in_read_counts_table); @@ -126,7 +158,7 @@ class ECCVMMSMMBuilder { std::vector msm_row_counts; msm_row_counts.reserve(msms.size() + 1); msm_row_counts.push_back(1); - // compute the program counter (i.e. the index among all single scalar muls) that each multiscalar + // compute the point counter (i.e. the index among all single scalar muls) that each multiscalar // multiplication will start at. std::vector pc_values; pc_values.reserve(msms.size() + 1); @@ -171,6 +203,7 @@ class ECCVMMSMMBuilder { } } + // update the log-derivative read count for the lookup associated with WNAF skew if (digit_idx == NUM_WNAF_DIGITS_PER_SCALAR - 1) { for (size_t row_idx = 0; row_idx < num_rows_per_digit; ++row_idx) { const size_t num_points_in_row = (row_idx + 1) * ADDITIONS_PER_ROW > msm_size @@ -182,7 +215,11 @@ class ECCVMMSMMBuilder { bool add = num_points_in_row > relative_point_idx; const size_t point_idx = offset + relative_point_idx; if (add) { - // pc starts at total_number_of_muls and decreses non-uniformly to 0 + // `pc` starts at total_number_of_muls and decreases non-uniformly to 0. + // -15 maps to the 1st point in the lookup table (array element 0) + // -1 maps to the point in the lookup table that corresponds to the negation of the + // original input point (i.e. the point we need to add into the accumulator if wnaf_skew + // is positive) int slice = msm[point_idx].wnaf_skew ? -1 : -15; update_read_count((total_number_of_muls - pc) + point_idx, slice); } @@ -195,15 +232,25 @@ class ECCVMMSMMBuilder { // The execution trace data for the MSM columns requires knowledge of intermediate values from *affine* point // addition. The naive solution to compute this data requires 2 field inversions per in-circuit group addition // evaluation. This is bad! To avoid this, we split the witness computation algorithm into 3 steps. - // Step 1: compute the execution trace group operations in *projective* coordinates - // Step 2: use batch inversion trick to convert all points into affine coordinates - // Step 3: populate the full execution trace, including the intermediate values from affine group operations + // Step 1: compute the execution trace group operations in *projective* coordinates. (these will be stored in + // `p1_trace`, `p2_trace`, and `p3_trace`) + // Step 2: use batch inversion trick to convert all points into affine coordinates + // Step 3: populate the full execution trace, including the intermediate values from affine group + // operations // This section sets up the data structures we need to store all intermediate ECC operations in projective form - const size_t num_point_adds_and_doubles = (num_msm_rows - 2) * 4; - const size_t num_accumulators = num_msm_rows - 1; - // In what fallows, either p1 + p2 = p3, or p1.dbl() = p3 + + const size_t num_point_adds_and_doubles = + (num_msm_rows - 2) * 4; // `num_msm_rows - 2` is the actual number of rows in the table required to compute + // the MSM; the msm table itself has a dummy row at the beginning and an extra row + // with the `x` and `y` coordinates of the accumulator at the end. (In general, the + // output of the accumulator from the computation at row `i` is present on row + // `i+1`. We multiply by 4 because each "row" of the VM processes 4 point-additions + // (and the fact that w = 4 means we must interleave with 4 doublings). This + // "corresponds" to the fact that `MSMROW.add_state` has 4 entries. + const size_t num_accumulators = num_msm_rows - 1; // for every row after the first row, we have an accumulator. + // In what follows, either p1 + p2 = p3, or p1.dbl() = p3 // We create 1 vector to store the entire point trace. We split into multiple containers using std::span - // (we want 1 vector object to more efficiently batch normalize points) + // (we want 1 vector object to more efficiently batch-normalize points) static constexpr size_t NUM_POINTS_IN_ADDITION_RELATION = 3; const size_t num_points_to_normalize = (num_point_adds_and_doubles * NUM_POINTS_IN_ADDITION_RELATION) + num_accumulators; @@ -211,8 +258,10 @@ class ECCVMMSMMBuilder { std::span p1_trace(&points_to_normalize[0], num_point_adds_and_doubles); std::span p2_trace(&points_to_normalize[num_point_adds_and_doubles], num_point_adds_and_doubles); std::span p3_trace(&points_to_normalize[num_point_adds_and_doubles * 2], num_point_adds_and_doubles); - // operation_trace records whether an entry in the p1/p2/p3 trace represents a point addition or doubling - std::vector operation_trace(num_point_adds_and_doubles); + // `is_double_or_add` records whether an entry in the p1/p2/p3 trace represents a point addition or + // doubling. if it is `true`, then we are doubling (i.e., the condition is that `p3 = p1.dbl()`), else we are + // adding (i.e., the condition is that `p3 = p1 + p2`). + std::vector is_double_or_add(num_point_adds_and_doubles); // accumulator_trace tracks the value of the ECCVM accumulator for each row std::span accumulator_trace(&points_to_normalize[num_point_adds_and_doubles * 3], num_accumulators); @@ -224,25 +273,35 @@ class ECCVMMSMMBuilder { // populate point trace, and the components of the MSM execution trace that do not relate to affine point // operations for (size_t msm_idx = 0; msm_idx < msms.size(); msm_idx++) { - Element accumulator = offset_generator; - const auto& msm = msms[msm_idx]; - size_t msm_row_index = msm_row_counts[msm_idx]; + Element accumulator = offset_generator; // for every MSM, we start with the same `offset_generator` + const auto& msm = msms[msm_idx]; // which MSM we are processing. This is of type `std::vector`. + size_t msm_row_index = msm_row_counts[msm_idx]; // the row where the given MSM starts const size_t msm_size = msm.size(); const size_t num_rows_per_digit = - (msm_size / ADDITIONS_PER_ROW) + ((msm_size % ADDITIONS_PER_ROW != 0) ? 1 : 0); - size_t trace_index = (msm_row_counts[msm_idx] - 1) * 4; - + (msm_size / ADDITIONS_PER_ROW) + + (msm_size % ADDITIONS_PER_ROW != + 0); // the Straus algorithm proceeds by incrementing through the digit-slots and doing + // computations *across* the `ScalarMul`s that make up our MSM. Each digit-slot therefore + // contributes the *ceiling* of `msm_size`/`ADDITIONS_PER_ROW`. + size_t trace_index = + (msm_row_counts[msm_idx] - 1) * 4; // tracks the index in the traces of `p1`, `p2`, `p3`, and + // `accumulator_trace` that we are filling out + + // for each digit-slot (`digit_idx`), and then for each row of the VM (which does `ADDITIONS_PER_ROW` point + // additions), we either enter in/process (`ADDITIONS_PER_ROW`) `AddState` objects, and then if necessary + // (i.e., if not at the last wNAF digit), process the four doublings. for (size_t digit_idx = 0; digit_idx < NUM_WNAF_DIGITS_PER_SCALAR; ++digit_idx) { - const auto pc = static_cast(pc_values[msm_idx]); + const auto pc = static_cast(pc_values[msm_idx]); // pc that our msm starts at + for (size_t row_idx = 0; row_idx < num_rows_per_digit; ++row_idx) { const size_t num_points_in_row = (row_idx + 1) * ADDITIONS_PER_ROW > msm_size ? (msm_size % ADDITIONS_PER_ROW) : ADDITIONS_PER_ROW; - auto& row = msm_rows[msm_row_index]; + auto& row = msm_rows[msm_row_index]; // actual `MSMRow` we will fill out in the body of this loop const size_t offset = row_idx * ADDITIONS_PER_ROW; row.msm_transition = (digit_idx == 0) && (row_idx == 0); + // each iteration of this loop process/enters in one of the `AddState` objects in `row.add_state`. for (size_t point_idx = 0; point_idx < ADDITIONS_PER_ROW; ++point_idx) { - auto& add_state = row.add_state[point_idx]; add_state.add = num_points_in_row > point_idx; int slice = add_state.add ? msm[offset + point_idx].wnaf_digits[digit_idx] : 0; @@ -250,9 +309,9 @@ class ECCVMMSMMBuilder { // if `row.add_state[point_idx].add = 1`, this indicates that we want to add the // `point_idx`'th point in the MSM columns into the MSM accumulator `add_state.slice` = A // 4-bit WNAF slice of the scalar multiplier associated with the point we are adding (the - // specific slice chosen depends on the value of msm_round) (WNAF = - // windowed-non-adjacent-form. Value range is `-15, -13, - // ..., 15`) If `add_state.add = 1`, we want `add_state.slice` to be the *compressed* + // specific slice chosen depends on the value of msm_round) (WNAF = our version of + // windowed-non-adjacent-form. Value range is `-15, -13,..., 15`) + // If `add_state.add = 1`, we want `add_state.slice` to be the *compressed* // form of the WNAF slice value. (compressed = no gaps in the value range. i.e. -15, // -13, ..., 15 maps to 0, ... , 15) add_state.slice = add_state.add ? (slice + 15) / 2 : 0; @@ -267,9 +326,10 @@ class ECCVMMSMMBuilder { p1_trace[trace_index] = p1; p2_trace[trace_index] = p2; p3_trace[trace_index] = accumulator; - operation_trace[trace_index] = false; + is_double_or_add[trace_index] = false; trace_index++; } + // Now, `row.add_state` has been fully processed and we fill in the rest of the members of `row`. accumulator_trace[msm_row_index] = accumulator; row.q_add = true; row.q_double = false; @@ -280,7 +340,11 @@ class ECCVMMSMMBuilder { row.pc = pc; msm_row_index++; } - // doubling + // after processing each digit-slot, we now take care of doubling (as long as we are not at the last + // digit). We add an `MSMRow`, `row`, whose four `AddState` objects in `row.add_state` + // are null, but we also populate `p1_trace`, `p2_trace`, `p3_trace`, and `is_double_or_add` for four + // indices, corresponding to the w=4 doubling operations we need to perform. This embodies the numerical + // "coincidence" that `ADDITIONS_PER_ROW == NUM_WNAF_DIGIT_BITS` if (digit_idx < NUM_WNAF_DIGITS_PER_SCALAR - 1) { auto& row = msm_rows[msm_row_index]; row.msm_transition = false; @@ -298,15 +362,16 @@ class ECCVMMSMMBuilder { add_state.collision_inverse = 0; p1_trace[trace_index] = accumulator; - p2_trace[trace_index] = accumulator; + p2_trace[trace_index] = accumulator; // dummy accumulator = accumulator.dbl(); p3_trace[trace_index] = accumulator; - operation_trace[trace_index] = true; + is_double_or_add[trace_index] = true; trace_index++; } accumulator_trace[msm_row_index] = accumulator; msm_row_index++; - } else { + } else // process `wnaf_skew`, i.e., the skew digit. + { for (size_t row_idx = 0; row_idx < num_rows_per_digit; ++row_idx) { auto& row = msm_rows[msm_row_index]; @@ -324,14 +389,17 @@ class ECCVMMSMMBuilder { add_state.point = add_state.add ? msm[offset + point_idx].precomputed_table[static_cast(add_state.slice)] - : AffineElement{ 0, 0 }; + : AffineElement{ + 0, 0 + }; // if the skew_bit is on, `slice == 7`. Then `precomputed_table[7] == -[P]`, as + // required for the skew logic. bool add_predicate = add_state.add ? msm[offset + point_idx].wnaf_skew : false; auto p1 = accumulator; accumulator = add_predicate ? accumulator + add_state.point : accumulator; p1_trace[trace_index] = p1; p2_trace[trace_index] = add_state.point; p3_trace[trace_index] = accumulator; - operation_trace[trace_index] = false; + is_double_or_add[trace_index] = false; trace_index++; } row.q_add = false; @@ -357,7 +425,7 @@ class ECCVMMSMMBuilder { std::vector inverse_trace(num_point_adds_and_doubles); parallel_for_range(num_point_adds_and_doubles, [&](size_t start, size_t end) { for (size_t operation_idx = start; operation_idx < end; ++operation_idx) { - if (operation_trace[operation_idx]) { + if (is_double_or_add[operation_idx]) { inverse_trace[operation_idx] = (p1_trace[operation_idx].y + p1_trace[operation_idx].y); } else { inverse_trace[operation_idx] = (p2_trace[operation_idx].x - p1_trace[operation_idx].x); @@ -373,8 +441,7 @@ class ECCVMMSMMBuilder { const auto& msm = msms[msm_idx]; size_t trace_index = ((msm_row_counts[msm_idx] - 1) * ADDITIONS_PER_ROW); size_t msm_row_index = msm_row_counts[msm_idx]; - // 1st MSM row will have accumulator equal to the previous MSM output - // (or point at infinity for 1st MSM) + // 1st MSM row will have accumulator equal to the previous MSM output (or point at infinity for first MSM) size_t accumulator_index = msm_row_counts[msm_idx] - 1; const size_t msm_size = msm.size(); const size_t num_rows_per_digit = @@ -383,12 +450,16 @@ class ECCVMMSMMBuilder { for (size_t digit_idx = 0; digit_idx < NUM_WNAF_DIGITS_PER_SCALAR; ++digit_idx) { for (size_t row_idx = 0; row_idx < num_rows_per_digit; ++row_idx) { auto& row = msm_rows[msm_row_index]; + // note that we do not store the "intermediate accumulators" that are implicit *within* a row (i.e., + // within a given `add_state` object). This is the reason why accumulator_index only increments once + // per `row_idx`. const Element& normalized_accumulator = accumulator_trace[accumulator_index]; BB_ASSERT_EQ(normalized_accumulator.is_point_at_infinity(), 0); row.accumulator_x = normalized_accumulator.x; row.accumulator_y = normalized_accumulator.y; for (size_t point_idx = 0; point_idx < ADDITIONS_PER_ROW; ++point_idx) { auto& add_state = row.add_state[point_idx]; + const auto& inverse = inverse_trace[trace_index]; const auto& p1 = p1_trace[trace_index]; const auto& p2 = p2_trace[trace_index]; @@ -400,6 +471,8 @@ class ECCVMMSMMBuilder { msm_row_index++; } + // if digit_idx < NUM_WNAF_DIGITS_PER_SCALAR - 1 we have to fill out our doubling row (which in fact + // amounts to 4 doublings) if (digit_idx < NUM_WNAF_DIGITS_PER_SCALAR - 1) { MSMRow& row = msm_rows[msm_row_index]; const Element& normalized_accumulator = accumulator_trace[accumulator_index]; @@ -409,15 +482,19 @@ class ECCVMMSMMBuilder { row.accumulator_y = acc_y; for (size_t point_idx = 0; point_idx < ADDITIONS_PER_ROW; ++point_idx) { auto& add_state = row.add_state[point_idx]; - add_state.collision_inverse = 0; + add_state.collision_inverse = 0; // no notion of "different x values" for a point doubling const FF& dx = p1_trace[trace_index].x; - const FF& inverse = inverse_trace[trace_index]; + const FF& inverse = inverse_trace[trace_index]; // here, 2y add_state.lambda = ((dx + dx + dx) * dx) * inverse; trace_index++; } accumulator_index++; msm_row_index++; - } else { + } else // this row corresponds to performing point additions to handle WNAF skew + // i.e. iterate over all the points in the MSM - if for a given point, `wnaf_skew == 1`, + // subtract the original point from the accumulator. if `digit_idx == NUM_WNAF_DIGITS_PER_SCALAR + // - 1` we have finished executing our double-and-add algorithm. + { for (size_t row_idx = 0; row_idx < num_rows_per_digit; ++row_idx) { MSMRow& row = msm_rows[msm_row_index]; const Element& normalized_accumulator = accumulator_trace[accumulator_index]; @@ -444,8 +521,8 @@ class ECCVMMSMMBuilder { } // populate the final row in the MSM execution trace. - // we always require 1 extra row at the end of the trace, because the accumulator x/y coordinates for row `i` - // are present at row `i+1` + // we always require 1 extra row at the end of the trace, because the x and y coordinates of the accumulator for + // row `i` are present at row `i+1` Element final_accumulator(accumulator_trace.back()); MSMRow& final_row = msm_rows.back(); final_row.pc = static_cast(pc_values.back()); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/precomputed_tables_builder.hpp b/barretenberg/cpp/src/barretenberg/eccvm/precomputed_tables_builder.hpp index cf1cf7239f9f..a400462376c1 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/precomputed_tables_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/precomputed_tables_builder.hpp @@ -21,8 +21,16 @@ class ECCVMPointTablePrecomputationBuilder { static constexpr size_t NUM_WNAF_DIGITS_PER_SCALAR = bb::eccvm::NUM_WNAF_DIGITS_PER_SCALAR; static constexpr size_t WNAF_DIGITS_PER_ROW = bb::eccvm::WNAF_DIGITS_PER_ROW; static constexpr size_t NUM_WNAF_DIGIT_BITS = bb::eccvm::NUM_WNAF_DIGIT_BITS; - - struct PointTablePrecoputationRow { + // Note that our implementation takes advantage of a numerical coincidence: + // `NUM_WNAF_DIGITS_PER_SCALAR`/`WNAF_DIGITS_PER_ROW`, the number of rows per scalar multiplication, is the same as + // |{P, 3P, ..., (2ʷ-1)P}| = 2ʷ⁻¹ == 8, which is basically the number of multiples of P we need to precompute. (To + // be precise, we also compute 2P, but this occurs on every row.) + struct PointTablePrecomputationRow { + // s1, ..., s8 are each 2 bits, so they jointly encode 16 bits of information, which corresponds precisely to + // the data of 4 wNAF digits. they are ordered from "highest order" to "lowest order". this means that s1s2 + // encodes the first (highest order) wNAF digit in consideration, and so on. the explicit encoding is: the + // concatenation, s_{2i}s_{2i+1}, is naturally a number in {0, 1, ..., 15}; to obtain the corresponding wNAF + // digit, multiply by 2 and subtract 15. int s1 = 0; int s2 = 0; int s3 = 0; @@ -36,19 +44,21 @@ class ECCVMPointTablePrecomputationBuilder { uint32_t pc = 0; uint32_t round = 0; uint256_t scalar_sum = 0; - AffineElement precompute_accumulator{ 0, 0 }; + AffineElement precompute_accumulator{ + 0, 0 + }; // contains a precomputed element, i.e., something in {P, 3P, ..., 15P}. AffineElement precompute_double{ 0, 0 }; }; - static std::vector compute_rows( + static std::vector compute_rows( const std::vector>& ecc_muls) { static constexpr size_t num_rows_per_scalar = NUM_WNAF_DIGITS_PER_SCALAR / WNAF_DIGITS_PER_ROW; const size_t num_precompute_rows = num_rows_per_scalar * ecc_muls.size() + 1; - std::vector precompute_state(num_precompute_rows); + std::vector precompute_state(num_precompute_rows); // start with empty row (shiftable polynomials must have 0 as first coefficient) - precompute_state[0] = PointTablePrecoputationRow{}; + precompute_state[0] = PointTablePrecomputationRow{}; // current impl doesn't work if not 4 static_assert(WNAF_DIGITS_PER_ROW == 4); @@ -60,12 +70,13 @@ class ECCVMPointTablePrecomputationBuilder { uint256_t scalar_sum = 0; for (size_t i = 0; i < num_rows_per_scalar; ++i) { - PointTablePrecoputationRow row; + PointTablePrecomputationRow row; const int slice0 = slices[i * WNAF_DIGITS_PER_ROW]; const int slice1 = slices[i * WNAF_DIGITS_PER_ROW + 1]; const int slice2 = slices[i * WNAF_DIGITS_PER_ROW + 2]; const int slice3 = slices[i * WNAF_DIGITS_PER_ROW + 3]; + // {-15, -13. ..., 13, 15} --> {0, 1, ..., 15} const int slice0base2 = (slice0 + 15) / 2; const int slice1base2 = (slice1 + 15) / 2; const int slice2base2 = (slice2 + 15) / 2; @@ -105,9 +116,12 @@ class ECCVMPointTablePrecomputationBuilder { if (last_row) { ASSERT(scalar_sum - entry.wnaf_skew, entry.scalar); } - + // the last element of the `precomputed_table` field of a `ScalarMul` is the double of the point. row.precompute_double = entry.precomputed_table[bb::eccvm::POINT_TABLE_SIZE]; // fill accumulator in reverse order i.e. first row = 15[P], then 13[P], ..., 1[P] + // note that this reflects a coincidence: the number of rows (per scalar multiplication) is + // the number of multiples that we need to precompute. Indeed, the latter is 2ʷ⁻¹, while the former + // depends both on w and on `NUM_SCALAR_BITS`. row.precompute_accumulator = entry.precomputed_table[bb::eccvm::POINT_TABLE_SIZE - 1 - i]; precompute_state[j * num_rows_per_scalar + i + 1] = (row); } diff --git a/barretenberg/cpp/src/barretenberg/eccvm/transcript_builder.hpp b/barretenberg/cpp/src/barretenberg/eccvm/transcript_builder.hpp index 1be7f4d02b41..6228396538fd 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/transcript_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/transcript_builder.hpp @@ -21,40 +21,63 @@ class ECCVMTranscriptBuilder { using Accumulator = typename std::vector; struct TranscriptRow { - + ///////////////////////////////////// // These fields are populated in the first loop - bool transcript_msm_infinity = false; - bool accumulator_not_empty = false; + ///////////////////////////////////// + + bool transcript_msm_infinity = false; // are we at the end of an MSM *and* is the output the point at infinity? + bool accumulator_not_empty = false; // not(is the accumulator either empty or point-at-infinity?) bool q_add = false; bool q_mul = false; bool q_eq = false; bool q_reset_accumulator = false; bool msm_transition = false; uint32_t pc = 0; - uint32_t msm_count = 0; - bool msm_count_zero_at_transition = false; - FF base_x = 0; - FF base_y = 0; - bool base_infinity = false; - uint256_t z1 = 0; - uint256_t z2 = 0; - bool z1_zero = false; - bool z2_zero = false; - uint32_t opcode = 0; - - // These fields are populated after converting Jacobian to affine coordinates - FF accumulator_x = 0; - FF accumulator_y = 0; - FF msm_output_x = 0; - FF msm_output_y = 0; - FF transcript_msm_intermediate_x = 0; - FF transcript_msm_intermediate_y = 0; - + uint32_t msm_count = 0; // number of multiplications in the MSM *up until and not including* this step. + bool msm_count_zero_at_transition = + false; // is the number of scalar muls we have completed at the end of our "MSM block" zero? + FF base_x = 0; // [P] = (base_x, base_y) + FF base_y = 0; // [P] = (base_x, base_y) + bool base_infinity = false; // is [P] == neutral element? + uint256_t z1 = 0; // `scalar` = z1 - \lambda * z2 = z1 + \zeta * z2, where $\zeta$ is a primitive sixth root of + // unity and \lambda is -\zeta. + uint256_t z2 = 0; // `scalar` = z1 - \lambda * z2 = z1 + \zeta * z2, where $\zeta$ is a primitive sixth root of + // unity and \lambda is -\zeta. + bool z1_zero = false; // `z1 == 0` + bool z2_zero = false; // `z2 == 0` + uint32_t opcode = 0; // opcode value, in {0, .., 15}, given by 8 * q_add + 4 * q_mul + 2 * q_eq + q_reset. + + ///////////////////////////////////// + // These fields are populated after converting projective to affine coordinates + ///////////////////////////////////// + + // [A] is the current accumulated point, in affine coordinates, for all EC operations. + // this is affected by an `add` op-code, or the end of an MSM, or a reset. + FF accumulator_x = 0; // [A] = (`accumulator_x`, `accumulator_y`) + FF accumulator_y = 0; // [A] = (`accumulator_x`, `accumulator_y`) + + // the following two are accumulators for the MSM. + // the difference between (`msm_output_x`, `msm_output_y`) and (`transcript_msm_intermediate_x`, + // `transcript_msm_intermediate_y`) is the OFFSET. + FF msm_output_x = 0; // if we are at the end of an MSM, output of MSM + OFFSET = (`msm_output_x`, + // `msm_output_y`), else, 0. + FF msm_output_y = 0; // if we are at the end of an MSM, output of MSM + OFFSET = (`msm_output_x`, + // `msm_output_y`), else 0. + FF transcript_msm_intermediate_x = + 0; // if we are at the end of an MSM, output of MSM = + // (`transcript_msm_intermediate_x`, `transcript_msm_intermediate_y`), else, 0. + FF transcript_msm_intermediate_y = + 0; // if we are at the end of an MSM, output of MSM = + // (`transcript_msm_intermediate_x`, `transcript_msm_intermediate_y`), else, 0. + + ///////////////////////////////////// // Computed during the lambda numerator and denominator computation + ///////////////////////////////////// bool transcript_add_x_equal = false; bool transcript_add_y_equal = false; - + ///////////////////////////////////// // Computed after the batch inversion + ///////////////////////////////////// FF base_x_inverse = 0; FF base_y_inverse = 0; FF transcript_add_lambda = 0; @@ -83,11 +106,16 @@ class ECCVMTranscriptBuilder { { return AffineElement(Element(other) - offset_generator()); } + + // maintains the state of the VM at any given "time" (i.e., at any given value of pc). struct VMState { - uint32_t pc = 0; - uint32_t count = 0; - Element accumulator = CycleGroup::affine_point_at_infinity; - Element msm_accumulator = offset_generator(); + uint32_t pc = 0; // decreasing point counter that tracks the total number of multiplications that our virtual + // machine has left to compute. + uint32_t count = 0; // Number of muls in the current MSM _excluding the current row_. + Element accumulator = CycleGroup::affine_point_at_infinity; // accumulator for all group operations. + Element msm_accumulator = + offset_generator(); // accumulator for the current MSM with an offset. (we start with offset_generator, + // i.e. random element of the group, to avoid bifurcated logic at each operation step.) bool is_accumulator_empty = true; }; @@ -98,8 +126,8 @@ class ECCVMTranscriptBuilder { * multi-scalar multiplications and point additions, while creating the * transcript of the operations. In the first loop over the rows of ECCOpQueue, it mostly populates the * TranscriptRow with boolean flags indicating the structure of the ops being performed, while performing - * elliptic curve operations in Jacobian coordinates, and then normalizes these points to affine coordinates. Batch - * inversion is used to optimize expensive finite field inversions. + * elliptic curve operations in Jacobian (a.k.a projective) coordinates, and then normalizes these points to affine + * coordinates. Batch inversion is used to optimize expensive finite field inversions. * * @param vm_operations ECCOpQueue * @param total_number_of_muls The total number of multiplications in the series of operations. @@ -112,6 +140,7 @@ class ECCVMTranscriptBuilder { const size_t num_vm_entries = vm_operations.size(); // The transcript contains an extra zero row at the beginning and the accumulated state at the end const size_t transcript_size = num_vm_entries + 2; + // `transcript_state[i+1]` corresponds to `vm_operation[i]`. std::vector transcript_state(transcript_size); // These vectors track quantities that we need to invert. @@ -123,9 +152,12 @@ class ECCVMTranscriptBuilder { std::vector add_lambda_numerator(num_vm_entries); std::vector msm_count_at_transition_inverse_trace(num_vm_entries); - Accumulator msm_accumulator_trace(num_vm_entries); - Accumulator accumulator_trace(num_vm_entries); - Accumulator intermediate_accumulator_trace(num_vm_entries); + Accumulator msm_accumulator_trace(num_vm_entries); // ith entry is either neutral element or the value of the + // just-completed MSM (shifted by the OFFSET). + Accumulator accumulator_trace(num_vm_entries); // ith entry is the total accumulated value up-to-now + Accumulator intermediate_accumulator_trace( + num_vm_entries); // ith entry is either the neutral element, or the actual value of the just-completed MSM + // (i.e., there is no OFFSET). VMState state{ .pc = total_number_of_muls, @@ -137,12 +169,12 @@ class ECCVMTranscriptBuilder { VMState updated_state; - // add an empty row. 1st row all zeroes because of our shiftable polynomials + // add an empty row: first row is all zeroes because of our shiftable polynomials. transcript_state[0] = (TranscriptRow{}); - // during the first iteration over the ECCOpQueue, the operations are being performed using Jacobian - // coordinates and the base point coordinates are recorded in the transcript. at the same time, the transcript - // logic is being populated + // during the first iteration over the ECCOpQueue, the operations are being performed using Jacobian (a.k.a. + // projective) coordinates and the base point coordinates are recorded in the transcript. at the same time, the + // transcript logic is being populated for (size_t i = 0; i < num_vm_entries; i++) { TranscriptRow& row = transcript_state[i + 1]; const ECCVMOperation& entry = vm_operations[i]; @@ -154,7 +186,8 @@ class ECCVMTranscriptBuilder { const bool z2_zero = is_mul ? entry.z2 == 0 : true; const bool base_point_infinity = entry.base_point.is_point_at_infinity(); - uint32_t num_muls = 0; + uint32_t num_muls = 0; // number of 128-bit multiplications the vm processes in this op_code. + // `num_muls` ∈ {0, 1, 2}. if (is_mul) { num_muls = static_cast(!z1_zero) + static_cast(!z2_zero); if (base_point_infinity) { @@ -162,7 +195,9 @@ class ECCVMTranscriptBuilder { } } updated_state.pc = state.pc - num_muls; - + // if we are at a `reset` or null op, reset the state. + // logically, we should add `updated_state.count = 0`, but this is taken care of later by conditional + // logic and hence is unnecessary here. if (entry.op_code.reset || entry.op_code.value() == 0) { updated_state.is_accumulator_empty = true; updated_state.accumulator = CycleGroup::point_at_infinity; @@ -170,20 +205,23 @@ class ECCVMTranscriptBuilder { } const bool last_row = (i == (num_vm_entries - 1)); - - // msm transition = current row is doing a lookup to validate output = msm output - // i.e. next row is not part of MSM and current row is part of MSM - // or next row is irrelevant and current row is a straight MUL + // next_not_msm == True if either we are at the last row or the next op_code is *not* a mul. const bool next_not_msm = last_row || !vm_operations[i + 1].op_code.mul; - // we reset the count in updated state if we are not accumulating and not doing an msm + // `msm_transition == True` iff we are at the end of an MSM. + // this holds iff: current op_code is `mul`, `next_not_msm == True`, and the total number of muls so far in + // this MSM (including this op_code) is positive. This later total number + // is `state.count + num_muls`. const bool msm_transition = is_mul && next_not_msm && (state.count + num_muls > 0); - // determine ongoing msm and update the respective counter + // determine ongoing/continuing msm and update the respective counter const bool current_ongoing_msm = is_mul && !next_not_msm; - + // we reset the count in updated state if we are not accumulating and not doing an msm updated_state.count = current_ongoing_msm ? state.count + num_muls : 0; + // process state based on whether we are at a `mul`, then whether or not this is the last mul in an MSM, + // then finally if this is an add. note that this mutates `updated_state`. (note that the middle option + // depends on the first.) if (is_mul) { process_mul(entry, updated_state, state); } @@ -204,14 +242,15 @@ class ECCVMTranscriptBuilder { msm_count_at_transition_inverse_trace[i] = ((state.count + num_muls) == 0) ? 0 : FF(state.count + num_muls); - // update the accumulators + // update the accumulators. note that `msm_accumulator_trace` and `intermediate_accumulate_trace` are the + // point-at-infinity *unless* we are at the end of an MSM. accumulator_trace[i] = state.accumulator; msm_accumulator_trace[i] = msm_transition ? updated_state.msm_accumulator : Element::infinity(); intermediate_accumulator_trace[i] = msm_transition ? (updated_state.msm_accumulator - offset_generator()) : Element::infinity(); state = updated_state; - + // if we are the last `mul`in an MSM, set the next state's `msm_accumulator` to the offset. if (is_mul && next_not_msm) { state.msm_accumulator = offset_generator(); } @@ -461,7 +500,7 @@ class ECCVMTranscriptBuilder { * the intermediate accumulator and the point in the current accumulator. * * In the case of point addition, we compute the difference between the coordinates of the current row in - * ECCVMOperations and the point in the accumulator. + * ECCVMOperations and the point in the current accumulator. * */ diff --git a/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp b/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp index 5fe706d527f2..d9d507a2a038 100644 --- a/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp +++ b/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp @@ -11,25 +11,12 @@ extern "C" { -#ifdef NO_MULTITHREADING uint32_t env_hardware_concurrency() { +#ifdef NO_MULTITHREADING return 1; -} #else -uint32_t env_hardware_concurrency() -{ -#ifndef __wasm__ - try { -#endif - static auto val = std::getenv("HARDWARE_CONCURRENCY"); - static const uint32_t cores = val ? (uint32_t)std::stoul(val) : std::thread::hardware_concurrency(); - return cores; -#ifndef __wasm__ - } catch (std::exception const&) { - throw std::runtime_error("HARDWARE_CONCURRENCY invalid."); - } + return std::thread::hardware_concurrency(); #endif } -#endif -} \ No newline at end of file +} diff --git a/barretenberg/cpp/src/barretenberg/env/logstr.cpp b/barretenberg/cpp/src/barretenberg/env/logstr.cpp index e5657b245074..ab58a5ab76b2 100644 --- a/barretenberg/cpp/src/barretenberg/env/logstr.cpp +++ b/barretenberg/cpp/src/barretenberg/env/logstr.cpp @@ -8,6 +8,9 @@ #include #include #include +#ifndef NO_MULTITHREADING +#include +#endif #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) #include @@ -33,13 +36,13 @@ std::size_t peak_rss_bytes() return static_cast(pmc.PeakWorkingSetSize); #elif defined(__APPLE__) || defined(__FreeBSD__) - struct rusage usage {}; + struct rusage usage{}; if (getrusage(RUSAGE_SELF, &usage) == 0) // ru_maxrss is already bytes on macOS / BSD return static_cast(usage.ru_maxrss); #elif defined(__linux__) - struct rusage usage {}; + struct rusage usage{}; if (getrusage(RUSAGE_SELF, &usage) == 0) // ru_maxrss is kilobytes on Linux → convert to bytes return static_cast(usage.ru_maxrss) * 1024ULL; @@ -62,6 +65,17 @@ std::size_t peak_rss_bytes() //--------------------------------------------------------------------- extern "C" void logstr(char const* msg) { +#ifndef NO_MULTITHREADING + static std::mutex log_mutex; + std::lock_guard lock(log_mutex); +#endif + + static bool disable_mem_usage = std::getenv("BB_DISABLE_MEM_USAGE") != nullptr; + if (disable_mem_usage) { + std::cerr << msg << '\n'; + return; + } + const std::size_t bytes = peak_rss_bytes(); std::cerr << msg; diff --git a/barretenberg/cpp/src/barretenberg/env/throw_or_abort_impl.cpp b/barretenberg/cpp/src/barretenberg/env/throw_or_abort_impl.cpp index 0411edd25aa1..2bdc126efc91 100644 --- a/barretenberg/cpp/src/barretenberg/env/throw_or_abort_impl.cpp +++ b/barretenberg/cpp/src/barretenberg/env/throw_or_abort_impl.cpp @@ -1,5 +1,8 @@ #include "barretenberg/common/log.hpp" #include +#ifdef STACKTRACES +#include +#endif inline void abort_with_message [[noreturn]] (std::string const& err) { @@ -10,6 +13,13 @@ inline void abort_with_message [[noreturn]] (std::string const& err) // Native implementation of throw_or_abort extern "C" void throw_or_abort_impl [[noreturn]] (const char* err) { + +#ifdef STACKTRACES + // Use backward library to print stack trace + backward::StackTrace trace; + trace.load_here(32); + backward::Printer{}.print(trace); +#endif #ifndef BB_NO_EXCEPTIONS throw std::runtime_error(err); #else diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index 378895a2bb37..df65c58e7cad 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -99,6 +99,14 @@ namespace bb { +/** + * @brief Enum to control verification key metadata serialization + */ +enum class VKSerializationMode : std::uint8_t { + FULL, // Serialize all metadata (log_circuit_size, num_public_inputs, pub_inputs_offset) + NO_METADATA // Serialize only commitments, no metadata +}; + // Specifies the regions of the execution trace containing non-trivial wire values struct ActiveRegionData { void add_range(const size_t start, const size_t end) @@ -148,7 +156,9 @@ template struct Precomput * * @tparam PrecomputedEntities An instance of PrecomputedEntities_ with affine_element data type and handle type. */ -template +template class NativeVerificationKey_ : public PrecomputedCommitments { public: using Commitment = typename PrecomputedCommitments::DataType; @@ -165,6 +175,25 @@ class NativeVerificationKey_ : public PrecomputedCommitments { this->num_public_inputs = num_public_inputs; }; + /** + * @brief Calculate the number of field elements needed for serialization + * @return size_t Number of field elements + */ + static size_t calc_num_data_types() + { + using namespace bb::field_conversion; + // Create a temporary instance to get the number of precomputed entities + size_t commitments_size = + PrecomputedCommitments::size() * Transcript::template calc_num_data_types(); + size_t metadata_size = 0; + if constexpr (SerializeMetadata == VKSerializationMode::FULL) { + // 3 metadata fields + commitments + metadata_size = 3 * Transcript::template calc_num_data_types(); + } + // else NO_METADATA: metadata_size remains 0 + return metadata_size + commitments_size; + } + /** * @brief Serialize verification key to field elements * @@ -181,17 +210,50 @@ class NativeVerificationKey_ : public PrecomputedCommitments { std::vector elements; - serialize(this->log_circuit_size, elements); - serialize(this->num_public_inputs, elements); - serialize(this->pub_inputs_offset, elements); + if constexpr (SerializeMetadata == VKSerializationMode::FULL) { + serialize(this->log_circuit_size, elements); + serialize(this->num_public_inputs, elements); + serialize(this->pub_inputs_offset, elements); + } + // else NO_METADATA: skip metadata serialization for (const Commitment& commitment : this->get_all()) { serialize(commitment, elements); } + NativeVerificationKey_ key; + key.from_field_elements(elements); return elements; }; + /** + * @brief Populate verification key from field elements + * @param elements Field elements to deserialize from + */ + size_t from_field_elements(const std::span& elements) + { + using namespace bb::field_conversion; + + size_t idx = 0; + auto deserialize = [&idx, &elements](T& target) { + size_t size = Transcript::template calc_num_data_types(); + target = Transcript::template deserialize(elements.subspan(idx, size)); + idx += size; + }; + + if constexpr (SerializeMetadata == VKSerializationMode::FULL) { + deserialize(this->log_circuit_size); + deserialize(this->num_public_inputs); + deserialize(this->pub_inputs_offset); + } + // else NO_METADATA: skip metadata deserialization + + for (Commitment& commitment : this->get_all()) { + deserialize(commitment); + } + return idx; + } + /** * @brief A model function to show how to compute the VK hash(without the Transcript abstracting things away) * @details Currently only used in testing. @@ -205,7 +267,7 @@ class NativeVerificationKey_ : public PrecomputedCommitments { } /** - * @brief Adds the verification key hash to the transcript and returns the hash. + * @brief Hashes the vk using the transcript's independent buffer and returns the hash. * @details Needed to make sure the Origin Tag system works. We need to set the origin tags of the VK witnesses in * the transcript. If we instead did the hashing outside of the transcript and submitted just the hash, only the * origin tag of the hash would be set properly. We want to avoid backpropagating origin tags to the actual VK @@ -216,7 +278,8 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * @param transcript * @returns The hash of the verification key */ - virtual fr add_hash_to_transcript(const std::string& domain_separator, Transcript& transcript) const + virtual typename Transcript::DataType hash_through_transcript(const std::string& domain_separator, + Transcript& transcript) const { transcript.add_to_independent_hash_buffer(domain_separator + "vk_log_circuit_size", this->log_circuit_size); transcript.add_to_independent_hash_buffer(domain_separator + "vk_num_public_inputs", this->num_public_inputs); @@ -226,10 +289,8 @@ class NativeVerificationKey_ : public PrecomputedCommitments { transcript.add_to_independent_hash_buffer(domain_separator + "vk_commitment", commitment); } - fr vk_hash = transcript.hash_independent_buffer(); - transcript.add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); - return vk_hash; - }; + return transcript.hash_independent_buffer(); + } }; /** @@ -238,8 +299,11 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * @tparam Builder * @tparam FF * @tparam PrecomputedCommitments + * @tparam SerializeMetadata Controls how metadata is serialized (FULL, NO_METADATA) */ -template +template class StdlibVerificationKey_ : public PrecomputedCommitments { public: using Builder = Builder_; @@ -292,14 +356,14 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { * @param builder * @return FF */ - FF hash(Builder& builder) + FF hash() { - FF vk_hash = stdlib::poseidon2::hash(builder, to_field_elements()); + FF vk_hash = stdlib::poseidon2::hash(to_field_elements()); return vk_hash; } /** - * @brief Adds the verification key hash to the transcript and returns the hash. + * @brief Hashes the vk using the transcript's independent buffer and returns the hash. * @details Needed to make sure the Origin Tag system works. We need to set the origin tags of the VK witnesses in * the transcript. If we instead did the hashing outside of the transcript and submitted just the hash, only the * origin tag of the hash would be set properly. We want to avoid backpropagating origin tags to the actual VK @@ -310,7 +374,7 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { * @param transcript * @returns The hash of the verification key */ - virtual FF add_hash_to_transcript(const std::string& domain_separator, Transcript& transcript) const + virtual FF hash_through_transcript(const std::string& domain_separator, Transcript& transcript) const { transcript.add_to_independent_hash_buffer(domain_separator + "vk_log_circuit_size", this->log_circuit_size); transcript.add_to_independent_hash_buffer(domain_separator + "vk_num_public_inputs", this->num_public_inputs); @@ -318,10 +382,8 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { for (const Commitment& commitment : this->get_all()) { transcript.add_to_independent_hash_buffer(domain_separator + "vk_commitment", commitment); } - FF vk_hash = transcript.hash_independent_buffer(); - transcript.add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); - return vk_hash; - }; + return transcript.hash_independent_buffer(); + } }; template class VKAndHash_ { @@ -397,21 +459,21 @@ template constexpr size_t compute_number_of_subrelations() * tuple of univariates whose size is equal to the number of subrelations of the relation. The length of a * univariate in an inner tuple is determined by the corresponding subrelation length and the number of keys to be * folded. - * @tparam optimised Enable optimised version with skipping some of the computation + * @tparam optimized Enable optimized version with skipping some of the computation */ -template +template constexpr auto create_protogalaxy_tuple_of_tuples_of_univariates() { constexpr auto seq = std::make_index_sequence>(); return [](std::index_sequence) { - if constexpr (optimised) { + if constexpr (optimized) { return flat_tuple::make_tuple( typename std::tuple_element_t::template ProtogalaxyTupleOfUnivariatesOverSubrelations< - NUM_KEYS>{}...); + NUM_INSTANCES>{}...); } else { return flat_tuple::make_tuple( typename std::tuple_element_t:: - template ProtogalaxyTupleOfUnivariatesOverSubrelationsNoOptimisticSkipping{}...); + template ProtogalaxyTupleOfUnivariatesOverSubrelationsNoOptimisticSkipping{}...); } }(seq); } @@ -472,6 +534,44 @@ template class UltraRollupRecursiveFlavor_; template class MegaRecursiveFlavor_; template class MegaZKRecursiveFlavor_; +// Serialization methods for NativeVerificationKey_. +// These should cover all base classes that do not need additional members, as long as the appropriate SerializeMetadata +// is set in the template parameters. +template +inline void read(uint8_t const*& it, NativeVerificationKey_& vk) +{ + using serialize::read; + + // Get the size directly from the static method + size_t num_frs = + NativeVerificationKey_::calc_num_data_types(); + + // Read exactly num_frs field elements from the buffer + std::vector field_elements(num_frs); + for (auto& element : field_elements) { + read(it, element); + } + // Then use from_field_elements to populate the verification key + vk.from_field_elements(field_elements); +} + +template +inline void write(std::vector& buf, + NativeVerificationKey_ const& vk) +{ + using serialize::write; + size_t before = buf.size(); + // Convert to field elements and write them directly without length prefix + auto field_elements = vk.to_field_elements(); + for (const auto& element : field_elements) { + write(buf, element); + } + size_t after = buf.size(); + size_t num_frs = + NativeVerificationKey_::calc_num_data_types(); + BB_ASSERT_EQ(after - before, num_frs * sizeof(bb::fr), "VK serialization mismatch"); +} + namespace avm2 { class AvmRecursiveFlavor; } diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp index 4783a05d66aa..c27d04e659b9 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp @@ -71,9 +71,8 @@ TEST(Flavor, GetRow) using Flavor = UltraFlavor; using FF = typename Flavor::FF; std::array, Flavor::NUM_ALL_ENTITIES> data; - std::generate(data.begin(), data.end(), []() { - return std::vector({ FF::random_element(), FF::random_element() }); - }); + std::generate( + data.begin(), data.end(), []() { return std::vector({ FF::random_element(), FF::random_element() }); }); Flavor::ProverPolynomials prover_polynomials; for (auto [poly, entry] : zip_view(prover_polynomials.get_all(), data)) { poly = Flavor::Polynomial(entry); diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp index d3e2884d682b..6c3dd8f44326 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp @@ -47,7 +47,6 @@ template auto _static_concatenate_base_class_get_labels( // Needed to force expansion of __VA_ARGS__ before converting to string. #define VARARGS_TO_STRING(...) #__VA_ARGS__ -// We use std::remove_reference to support a flavor that has references as members. This is an AVM use case. #define DEFINE_REF_VIEW(...) \ [[nodiscard]] auto get_all() \ { \ diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor_serialization.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/flavor_serialization.test.cpp index 29650de082f7..b27c4f8ddf19 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor_serialization.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor_serialization.test.cpp @@ -20,7 +20,7 @@ using namespace bb; template class FlavorSerializationTests : public ::testing::Test { public: using Builder = typename Flavor::CircuitBuilder; - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = typename Flavor::VerificationKey; protected: @@ -38,7 +38,7 @@ TYPED_TEST_SUITE(FlavorSerializationTests, FlavorTypes); TYPED_TEST(FlavorSerializationTests, VerificationKeySerialization) { using Builder = typename TestFixture::Builder; - using DeciderProvingKey = typename TestFixture::DeciderProvingKey; + using ProverInstance = typename TestFixture::ProverInstance; using VerificationKey = typename TestFixture::VerificationKey; Builder builder; @@ -47,8 +47,8 @@ TYPED_TEST(FlavorSerializationTests, VerificationKeySerialization) MockCircuits::add_arithmetic_gates_with_public_inputs(builder, /*num_gates=*/100); stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); - auto proving_key = std::make_shared(builder); - VerificationKey original_vkey{ proving_key->get_precomputed() }; + auto prover_instance = std::make_shared(builder); + VerificationKey original_vkey{ prover_instance->get_precomputed() }; // Serialize and deserialize the verification key std::vector vkey_buffer = to_buffer(original_vkey); diff --git a/barretenberg/cpp/src/barretenberg/flavor/grand_product_library.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/grand_product_library.test.cpp index d7b860df4030..71a49d27f03f 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/grand_product_library.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/grand_product_library.test.cpp @@ -55,7 +55,6 @@ template class GrandProductTests : public testing::Test { .beta = beta, .gamma = gamma, .public_input_delta = 1, - .lookup_grand_product_delta = 1, }; compute_grand_product>(prover_polynomials, params); diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp index 00f2dd0614c8..b6abac34640d 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp @@ -44,6 +44,9 @@ class MegaFlavor { using TraceBlocks = MegaExecutionTraceBlocks; using Transcript = NativeTranscript; + // An upper bound on the size of the Mega-circuits. `CONST_PG_LOG_N` bounds the log circuit sizes in the CIVC + // context. `MEGA_AVM_LOG_N` is determined by the size of the AVMRecursiveVerifier. + static constexpr size_t VIRTUAL_LOG_N = std::max(CONST_PG_LOG_N, MEGA_AVM_LOG_N); // indicates when evaluating sumcheck, edges can be left as degree-1 monomials static constexpr bool USE_SHORT_MONOMIALS = true; // Indicates that this flavor runs with non-ZK Sumcheck. @@ -100,7 +103,7 @@ class MegaFlavor { static constexpr size_t OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS = /* 1. NUM_WITNESS_ENTITIES commitments */ (NUM_WITNESS_ENTITIES * num_frs_comm); - static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = VIRTUAL_LOG_N) { return /* 2. virtual_log_n sumcheck univariates */ (virtual_log_n * BATCHED_RELATION_PARTIAL_LENGTH * num_frs_fr) + @@ -111,7 +114,7 @@ class MegaFlavor { /* 7. KZG W commitment */ (num_frs_comm); } - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + DECIDER_PROOF_LENGTH(virtual_log_n); } @@ -122,15 +125,15 @@ class MegaFlavor { static constexpr size_t NUM_SUBRELATIONS = compute_number_of_subrelations(); using SubrelationSeparators = std::array; - template + template using ProtogalaxyTupleOfTuplesOfUnivariatesNoOptimisticSkipping = - decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); + decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); - template + template using ProtogalaxyTupleOfTuplesOfUnivariates = decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); + NUM_INSTANCES, + /*optimized=*/true>()); // Whether or not the first row of the execution trace is reserved for 0s to enable shifts static constexpr bool has_zero_row = true; @@ -176,8 +179,6 @@ class MegaFlavor { databus_id // column 30 // id polynomial, i.e. id_i = i ) - static constexpr CircuitType CIRCUIT_TYPE = CircuitBuilder::CIRCUIT_TYPE; - auto get_non_gate_selectors() { return RefArray{ q_m, q_c, q_l, q_r, q_o, q_4 }; }; auto get_gate_selectors() { @@ -362,7 +363,7 @@ class MegaFlavor { // fully-formed constructor ProverPolynomials(size_t circuit_size) { - PROFILE_THIS_NAME("ProverPolynomials(size_t)"); + BB_BENCH_NAME("ProverPolynomials(size_t)"); for (auto& poly : get_to_be_shifted()) { poly = Polynomial{ /*memory size*/ circuit_size - 1, @@ -386,7 +387,6 @@ class MegaFlavor { [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } [[nodiscard]] AllValues get_row(size_t row_idx) const { - PROFILE_THIS_NAME("MegaFlavor::get_row"); AllValues result; for (auto [result_field, polynomial] : zip_view(result.get_all(), this->get_all())) { result_field = polynomial[row_idx]; @@ -437,11 +437,6 @@ class MegaFlavor { */ class VerificationKey : public NativeVerificationKey_, Transcript> { public: - // Serialized Verification Key length in fields - static constexpr size_t VERIFICATION_KEY_LENGTH = - /* 1. Metadata (log_circuit_size, num_public_inputs, pub_inputs_offset) */ (3 * num_frs_fr) + - /* 2. NUM_PRECOMPUTED_ENTITIES commitments */ (NUM_PRECOMPUTED_ENTITIES * num_frs_comm); - VerificationKey() = default; VerificationKey(const size_t circuit_size, const size_t num_public_inputs) : NativeVerificationKey_(circuit_size, num_public_inputs) @@ -465,44 +460,8 @@ class MegaFlavor { commitment = commitment_key.commit(polynomial); } } - - // Don't statically check for object completeness. - using MSGPACK_NO_STATIC_CHECK = std::true_type; - MSGPACK_FIELDS(log_circuit_size, - num_public_inputs, - pub_inputs_offset, - q_m, - q_c, - q_l, - q_r, - q_o, - q_4, - q_busread, - q_lookup, - q_arith, - q_delta_range, - q_elliptic, - q_memory, - q_nnf, - q_poseidon2_external, - q_poseidon2_internal, - sigma_1, - sigma_2, - sigma_3, - sigma_4, - id_1, - id_2, - id_3, - id_4, - table_1, - table_2, - table_3, - table_4, - lagrange_first, - lagrange_last, - lagrange_ecc_op, - databus_id); }; + /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp index f192426abf25..d91b7bb5c3af 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp @@ -47,7 +47,7 @@ template class MegaRecursiveFlavor_ { using NativeFlavor = MegaFlavor; using Transcript = bb::BaseTranscript>; - + static constexpr size_t VIRTUAL_LOG_N = MegaFlavor::VIRTUAL_LOG_N; // indicates when evaluating sumcheck, edges can be left as degree-1 monomials static constexpr bool USE_SHORT_MONOMIALS = MegaFlavor::USE_SHORT_MONOMIALS; // Note(luke): Eventually this may not be needed at all @@ -140,18 +140,18 @@ template class MegaRecursiveFlavor_ { * @param builder * @param elements */ - VerificationKey(CircuitBuilder& builder, std::span elements) + VerificationKey(std::span elements) { using namespace bb::stdlib::field_conversion; size_t num_frs_read = 0; - this->log_circuit_size = deserialize_from_frs(builder, elements, num_frs_read); - this->num_public_inputs = deserialize_from_frs(builder, elements, num_frs_read); - this->pub_inputs_offset = deserialize_from_frs(builder, elements, num_frs_read); + this->log_circuit_size = deserialize_from_frs(elements, num_frs_read); + this->num_public_inputs = deserialize_from_frs(elements, num_frs_read); + this->pub_inputs_offset = deserialize_from_frs(elements, num_frs_read); for (Commitment& commitment : this->get_all()) { - commitment = deserialize_from_frs(builder, elements, num_frs_read); + commitment = deserialize_from_frs(elements, num_frs_read); } if (num_frs_read != elements.size()) { @@ -174,7 +174,7 @@ template class MegaRecursiveFlavor_ { for (const auto& idx : witness_indices) { vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); } - return VerificationKey(builder, vk_fields); + return VerificationKey(vk_fields); } /** diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_zk_flavor.hpp index 8c1be1bd4f6d..1df6b94e414b 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_zk_flavor.hpp @@ -24,7 +24,7 @@ class MegaZKFlavor : public bb::MegaFlavor { "LIBRA_UNIVARIATES_LENGTH must be equal to MegaZKFlavor::BATCHED_RELATION_PARTIAL_LENGTH"); // Proof length formula - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = MegaFlavor::VIRTUAL_LOG_N) { return /* 1. NUM_WITNESS_ENTITIES commitments */ (NUM_WITNESS_ENTITIES * num_frs_comm) + /* 2. Libra concatenation commitment*/ (num_frs_comm) + diff --git a/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp index 68908c1be13d..5ecd62da9a6e 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp @@ -6,7 +6,7 @@ #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include "barretenberg/translator_vm/translator_flavor.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" #include @@ -35,7 +35,8 @@ template class NativeVerificationKeyTests : public ::testing:: stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); if constexpr (HasIPAAccumulator) { auto [stdlib_opening_claim, ipa_proof] = - IPA>::create_fake_ipa_claim_and_proof(builder); + IPA>::create_random_valid_ipa_claim_and_proof( + builder); stdlib_opening_claim.set_public(); builder.ipa_proof = ipa_proof; } @@ -44,11 +45,11 @@ template class NativeVerificationKeyTests : public ::testing:: VerificationKey create_vk() { if constexpr (IsUltraOrMegaHonk) { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; Builder builder; set_default_pairing_points_and_ipa_claim_and_proof(builder); - auto proving_key = std::make_shared(builder); - return VerificationKey{ proving_key->get_precomputed() }; + auto prover_instance = std::make_shared(builder); + return VerificationKey{ prover_instance->get_precomputed() }; } else { return VerificationKey(); } @@ -61,7 +62,7 @@ TYPED_TEST_SUITE(NativeVerificationKeyTests, FlavorTypes); /** * @brief Checks that the hash produced from calling to_field_elements and then add_to_independent_hash_buffer is the - * same as the hash() call and also the same as the add_hash_to_transcript. + * same as the hash() call and also the same as the hash_through_transcript. * */ TYPED_TEST(NativeVerificationKeyTests, VKHashingConsistency) @@ -79,15 +80,15 @@ TYPED_TEST(NativeVerificationKeyTests, VKHashingConsistency) for (const auto& field_element : vk_field_elements) { transcript.add_to_independent_hash_buffer("vk_element", field_element); } - fr vkey_hash_1 = transcript.hash_independent_buffer(); + fr vk_hash_1 = transcript.hash_independent_buffer(); // Second method of hashing: using hash(). - fr vkey_hash_2 = vk.hash(); - EXPECT_EQ(vkey_hash_1, vkey_hash_2); + fr vk_hash_2 = vk.hash(); + EXPECT_EQ(vk_hash_1, vk_hash_2); if constexpr (!IsAnyOf) { - // Third method of hashing: using add_hash_to_transcript. + // Third method of hashing: using hash_through_transcript. typename Flavor::Transcript transcript_2; - fr vkey_hash_3 = vk.add_hash_to_transcript("", transcript_2); - EXPECT_EQ(vkey_hash_2, vkey_hash_3); + fr vk_hash_3 = vk.hash_through_transcript("", transcript_2); + EXPECT_EQ(vk_hash_2, vk_hash_3); } } @@ -105,5 +106,5 @@ TYPED_TEST(NativeVerificationKeyTests, VKSizeCheck) using VerificationKey = typename Flavor::VerificationKey; VerificationKey vk(TestFixture::create_vk()); - EXPECT_EQ(vk.to_field_elements().size(), VerificationKey::VERIFICATION_KEY_LENGTH); + EXPECT_EQ(vk.to_field_elements().size(), VerificationKey::calc_num_data_types()); } diff --git a/barretenberg/cpp/src/barretenberg/flavor/permutation_lib.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/permutation_lib.test.cpp index fa98a919dc1e..0f949037f4c4 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/permutation_lib.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/permutation_lib.test.cpp @@ -53,7 +53,7 @@ class PermutationHelperTests : public ::testing::Test { */ - // construct_selector_polynomials(circuit_constructor, proving_key.get()); + // construct_selector_polynomials(circuit_constructor, prover_instance.get()); } }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp index 40a9dd799583..ff8e42196bcd 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp @@ -7,7 +7,7 @@ #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" #include @@ -21,7 +21,8 @@ template class StdlibVerificationKeyTests : public ::testing:: stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); if constexpr (HasIPAAccumulator) { auto [stdlib_opening_claim, ipa_proof] = - IPA>::create_fake_ipa_claim_and_proof(builder); + IPA>::create_random_valid_ipa_claim_and_proof( + builder); stdlib_opening_claim.set_public(); builder.ipa_proof = ipa_proof; } @@ -41,7 +42,7 @@ TYPED_TEST_SUITE(StdlibVerificationKeyTests, FlavorTypes); /** * @brief Checks that the hash produced from calling to_field_elements and then add_to_independent_hash_buffer is the - * same as the hash() call and also the same as the add_hash_to_transcript. + * same as the hash() call and also the same as the hash_through_transcript. * */ TYPED_TEST(StdlibVerificationKeyTests, VKHashingConsistency) @@ -59,13 +60,13 @@ TYPED_TEST(StdlibVerificationKeyTests, VKHashingConsistency) if constexpr (IsAnyOf) { native_vk = std::make_shared(); } else { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using InnerBuilder = typename NativeFlavor::CircuitBuilder; InnerBuilder builder; TestFixture::set_default_pairing_points_and_ipa_claim_and_proof(builder); - auto proving_key = std::make_shared(builder); - native_vk = std::make_shared(proving_key->get_precomputed()); + auto prover_instance = std::make_shared(builder); + native_vk = std::make_shared(prover_instance->get_precomputed()); } OuterBuilder outer_builder; @@ -77,14 +78,14 @@ TYPED_TEST(StdlibVerificationKeyTests, VKHashingConsistency) for (const auto& field_element : vk_field_elements) { transcript.add_to_independent_hash_buffer("vk_element", field_element); } - FF vkey_hash_1 = transcript.hash_independent_buffer(); + FF vk_hash_1 = transcript.hash_independent_buffer(); // Second method of hashing: using hash(). - FF vkey_hash_2 = vk.hash(outer_builder); - EXPECT_EQ(vkey_hash_1.get_value(), vkey_hash_2.get_value()); - // Third method of hashing: using add_hash_to_transcript. + FF vk_hash_2 = vk.hash(); + EXPECT_EQ(vk_hash_1.get_value(), vk_hash_2.get_value()); + // Third method of hashing: using hash_through_transcript. if constexpr (!IsAnyOf) { StdlibTranscript transcript_2; - FF vkey_hash_3 = vk.add_hash_to_transcript("", transcript_2); - EXPECT_EQ(vkey_hash_2.get_value(), vkey_hash_3.get_value()); + FF vk_hash_3 = vk.hash_through_transcript("", transcript_2); + EXPECT_EQ(vk_hash_2.get_value(), vk_hash_3.get_value()); } } diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp index 77bca8aa7de7..fff3e8b07efa 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp @@ -44,6 +44,7 @@ class UltraFlavor { using CommitmentKey = bb::CommitmentKey; using VerifierCommitmentKey = bb::VerifierCommitmentKey; + static constexpr size_t VIRTUAL_LOG_N = CONST_PROOF_SIZE_LOG_N; // indicates when evaluating sumcheck, edges can be left as degree-1 monomials static constexpr bool USE_SHORT_MONOMIALS = true; @@ -113,7 +114,7 @@ class UltraFlavor { static constexpr size_t OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS = /* 1. NUM_WITNESS_ENTITIES commitments */ (NUM_WITNESS_ENTITIES * num_frs_comm); - static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = VIRTUAL_LOG_N) { return /* 2. virtual_log_n sumcheck univariates */ (virtual_log_n * BATCHED_RELATION_PARTIAL_LENGTH * num_frs_fr) + @@ -124,19 +125,19 @@ class UltraFlavor { /* 7. KZG W commitment */ (num_frs_comm); } - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + DECIDER_PROOF_LENGTH(virtual_log_n); } - template + template using ProtogalaxyTupleOfTuplesOfUnivariatesNoOptimisticSkipping = - decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); - template + decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); + template using ProtogalaxyTupleOfTuplesOfUnivariates = decltype(create_protogalaxy_tuple_of_tuples_of_univariates()); + NUM_INSTANCES, + /*optimized=*/true>()); // Whether or not the first row of the execution trace is reserved for 0s to enable shifts static constexpr bool has_zero_row = true; @@ -181,8 +182,6 @@ class UltraFlavor { lagrange_first, // column 26 lagrange_last) // column 27 - static constexpr CircuitType CIRCUIT_TYPE = CircuitBuilder::CIRCUIT_TYPE; - auto get_non_gate_selectors() { return RefArray{ q_m, q_c, q_l, q_r, q_o, q_4 }; } auto get_gate_selectors() { @@ -278,7 +277,7 @@ class UltraFlavor { ProverPolynomials(size_t circuit_size) { - PROFILE_THIS_NAME("creating empty prover polys"); + BB_BENCH_NAME("creating empty prover polys"); for (auto& poly : get_to_be_shifted()) { poly = Polynomial{ /*memory size*/ circuit_size - 1, @@ -301,7 +300,6 @@ class UltraFlavor { [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } [[nodiscard]] AllValues get_row(const size_t row_idx) const { - PROFILE_THIS_NAME("UltraFlavor::get_row"); AllValues result; for (auto [result_field, polynomial] : zip_view(result.get_all(), get_all())) { result_field = polynomial[row_idx]; @@ -386,7 +384,7 @@ class UltraFlavor { * proof. * */ - void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = VIRTUAL_LOG_N) { // take current proof and put them into the struct auto& proof_data = this->proof_data; @@ -427,34 +425,34 @@ class UltraFlavor { * modified. * */ - void serialize_full_transcript(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + void serialize_full_transcript(size_t virtual_log_n = VIRTUAL_LOG_N) { auto& proof_data = this->proof_data; size_t old_proof_length = proof_data.size(); proof_data.clear(); // clear proof_data so the rest of the function can replace it for (const auto& public_input : public_inputs) { - Base::template serialize_to_buffer(public_input, proof_data); + Base::serialize_to_buffer(public_input, proof_data); } - Base::template serialize_to_buffer(w_l_comm, proof_data); - Base::template serialize_to_buffer(w_r_comm, proof_data); - Base::template serialize_to_buffer(w_o_comm, proof_data); - Base::template serialize_to_buffer(lookup_read_counts_comm, proof_data); - Base::template serialize_to_buffer(lookup_read_tags_comm, proof_data); - Base::template serialize_to_buffer(w_4_comm, proof_data); - Base::template serialize_to_buffer(lookup_inverses_comm, proof_data); - Base::template serialize_to_buffer(z_perm_comm, proof_data); + Base::serialize_to_buffer(w_l_comm, proof_data); + Base::serialize_to_buffer(w_r_comm, proof_data); + Base::serialize_to_buffer(w_o_comm, proof_data); + Base::serialize_to_buffer(lookup_read_counts_comm, proof_data); + Base::serialize_to_buffer(lookup_read_tags_comm, proof_data); + Base::serialize_to_buffer(w_4_comm, proof_data); + Base::serialize_to_buffer(lookup_inverses_comm, proof_data); + Base::serialize_to_buffer(z_perm_comm, proof_data); for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(sumcheck_univariates[i], proof_data); + Base::serialize_to_buffer(sumcheck_univariates[i], proof_data); } - Base::template serialize_to_buffer(sumcheck_evaluations, proof_data); + Base::serialize_to_buffer(sumcheck_evaluations, proof_data); for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::template serialize_to_buffer(gemini_fold_comms[i], proof_data); + Base::serialize_to_buffer(gemini_fold_comms[i], proof_data); } for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(gemini_fold_evals[i], proof_data); + Base::serialize_to_buffer(gemini_fold_evals[i], proof_data); } - Base::template serialize_to_buffer(shplonk_q_comm, proof_data); - Base::template serialize_to_buffer(kzg_w_comm, proof_data); + Base::serialize_to_buffer(shplonk_q_comm, proof_data); + Base::serialize_to_buffer(kzg_w_comm, proof_data); // sanity check to make sure we generate the same length of proof as before. BB_ASSERT_EQ(proof_data.size(), old_proof_length); @@ -473,11 +471,6 @@ class UltraFlavor { */ class VerificationKey : public NativeVerificationKey_, Transcript> { public: - // Serialized Verification Key length in fields - static constexpr size_t VERIFICATION_KEY_LENGTH = - /* 1. Metadata (log_circuit_size, num_public_inputs, pub_inputs_offset) */ (3 * num_frs_fr) + - /* 2. NUM_PRECOMPUTED_ENTITIES commitments */ (NUM_PRECOMPUTED_ENTITIES * num_frs_comm); - bool operator==(const VerificationKey&) const = default; VerificationKey() = default; VerificationKey(const size_t circuit_size, const size_t num_public_inputs) @@ -495,42 +488,6 @@ class UltraFlavor { commitment = commitment_key.commit(polynomial); } } - - // Don't statically check for object completeness. - using MSGPACK_NO_STATIC_CHECK = std::true_type; - - // For serialising and deserialising data - MSGPACK_FIELDS(log_circuit_size, - num_public_inputs, - pub_inputs_offset, - q_m, - q_c, - q_l, - q_r, - q_o, - q_4, - q_lookup, - q_arith, - q_delta_range, - q_elliptic, - q_memory, - q_nnf, - q_poseidon2_external, - q_poseidon2_internal, - sigma_1, - sigma_2, - sigma_3, - sigma_4, - id_1, - id_2, - id_3, - id_4, - table_1, - table_2, - table_3, - table_4, - lagrange_first, - lagrange_last); }; /** @@ -541,7 +498,7 @@ class UltraFlavor { PartiallyEvaluatedMultivariates() = default; PartiallyEvaluatedMultivariates(const size_t circuit_size) { - PROFILE_THIS_NAME("PartiallyEvaluatedMultivariates constructor"); + BB_BENCH_NAME("PartiallyEvaluatedMultivariates constructor"); // Storage is only needed after the first partial evaluation, hence polynomials of // size (n / 2) @@ -551,7 +508,7 @@ class UltraFlavor { } PartiallyEvaluatedMultivariates(const ProverPolynomials& full_polynomials, size_t circuit_size) { - PROFILE_THIS_NAME("PartiallyEvaluatedMultivariates constructor"); + BB_BENCH_NAME("PartiallyEvaluatedMultivariates constructor"); for (auto [poly, full_poly] : zip_view(get_all(), full_polynomials.get_all())) { // After the initial sumcheck round, the new size is CEIL(size/2). size_t desired_size = full_poly.end_index() / 2 + full_poly.end_index() % 2; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp index ea92d4d192d7..38ccefbeaf5a 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp @@ -44,7 +44,7 @@ class UltraKeccakFlavor : public bb::UltraFlavor { static constexpr size_t OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS = /* 1. NUM_WITNESS_ENTITIES commitments */ (NUM_WITNESS_ENTITIES * num_elements_comm); - static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t DECIDER_PROOF_LENGTH(size_t virtual_log_n = VIRTUAL_LOG_N) { return /* 2. virtual_log_n sumcheck univariates */ (virtual_log_n * BATCHED_RELATION_PARTIAL_LENGTH * num_elements_fr) + @@ -55,7 +55,7 @@ class UltraKeccakFlavor : public bb::UltraFlavor { /* 7. KZG W commitment */ (num_elements_comm); } - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + DECIDER_PROOF_LENGTH(virtual_log_n); } @@ -75,7 +75,6 @@ class UltraKeccakFlavor : public bb::UltraFlavor { static constexpr size_t VERIFICATION_KEY_LENGTH = /* 1. Metadata (log_circuit_size, num_public_inputs, pub_inputs_offset) */ (3 * num_elements_fr) + /* 2. NUM_PRECOMPUTED_ENTITIES commitments */ (NUM_PRECOMPUTED_ENTITIES * num_elements_comm); - VerificationKey() = default; VerificationKey(const size_t circuit_size, const size_t num_public_inputs) : NativeVerificationKey_(circuit_size, num_public_inputs) @@ -92,61 +91,6 @@ class UltraKeccakFlavor : public bb::UltraFlavor { commitment = commitment_key.commit(polynomial); } } - - /** - * @brief Adds the verification key witnesses directly to the transcript. - * @details Needed to make sure the Origin Tag system works. See the base class function for - * more details. - * - * @param domain_separator - * @param transcript - * - * @returns The hash of the verification key - */ - fr add_hash_to_transcript(const std::string& domain_separator, Transcript& transcript) const override - { - // This hash contains a hash of the entire vk - including all of the elements - const fr hash = this->hash(); - - transcript.add_to_hash_buffer(domain_separator + "vk_hash", hash); - return hash; - } - - // Don't statically check for object completeness. - using MSGPACK_NO_STATIC_CHECK = std::true_type; - - // For serialising and deserialising data - MSGPACK_FIELDS(log_circuit_size, - num_public_inputs, - pub_inputs_offset, - q_m, - q_c, - q_l, - q_r, - q_o, - q_4, - q_lookup, - q_arith, - q_delta_range, - q_elliptic, - q_memory, - q_nnf, - q_poseidon2_external, - q_poseidon2_internal, - sigma_1, - sigma_2, - sigma_3, - sigma_4, - id_1, - id_2, - id_3, - id_4, - table_1, - table_2, - table_3, - table_4, - lagrange_first, - lagrange_last); }; // Specialize for Ultra (general case used in UltraRecursive). diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp index cf84f3282c8e..365d120fee9b 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp @@ -22,7 +22,7 @@ class UltraKeccakZKFlavor : public UltraKeccakFlavor { "LIBRA_UNIVARIATES_LENGTH must be equal to UltraKeccakZKFlavor::BATCHED_RELATION_PARTIAL_LENGTH"); // Proof length formula method - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return /* 1. NUM_WITNESS_ENTITIES commitments */ (NUM_WITNESS_ENTITIES * num_elements_comm) + /* 2. Libra concatenation commitment*/ (num_elements_comm) + @@ -86,7 +86,7 @@ class UltraKeccakZKFlavor : public UltraKeccakFlavor { * proof. * */ - void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = VIRTUAL_LOG_N) { // take current proof and put them into the struct size_t num_frs_read = 0; @@ -141,47 +141,47 @@ class UltraKeccakZKFlavor : public UltraKeccakFlavor { * modified. * */ - void serialize_full_transcript(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + void serialize_full_transcript(size_t virtual_log_n = VIRTUAL_LOG_N) { auto& proof_data = this->proof_data; size_t old_proof_length = proof_data.size(); proof_data.clear(); // clear proof_data so the rest of the function can replace it for (const auto& public_input : this->public_inputs) { - Base::template serialize_to_buffer(public_input, proof_data); + Base::serialize_to_buffer(public_input, proof_data); } - Base::template serialize_to_buffer(this->w_l_comm, proof_data); - Base::template serialize_to_buffer(this->w_r_comm, proof_data); - Base::template serialize_to_buffer(this->w_o_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_counts_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_tags_comm, proof_data); - Base::template serialize_to_buffer(this->w_4_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_inverses_comm, proof_data); - Base::template serialize_to_buffer(this->z_perm_comm, proof_data); - Base::template serialize_to_buffer(libra_concatenation_commitment, proof_data); - Base::template serialize_to_buffer(libra_sum, proof_data); + Base::serialize_to_buffer(this->w_l_comm, proof_data); + Base::serialize_to_buffer(this->w_r_comm, proof_data); + Base::serialize_to_buffer(this->w_o_comm, proof_data); + Base::serialize_to_buffer(this->lookup_read_counts_comm, proof_data); + Base::serialize_to_buffer(this->lookup_read_tags_comm, proof_data); + Base::serialize_to_buffer(this->w_4_comm, proof_data); + Base::serialize_to_buffer(this->lookup_inverses_comm, proof_data); + Base::serialize_to_buffer(this->z_perm_comm, proof_data); + Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); + Base::serialize_to_buffer(libra_sum, proof_data); for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); + Base::serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); } - Base::template serialize_to_buffer(libra_claimed_evaluation, proof_data); + Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); - Base::template serialize_to_buffer(this->sumcheck_evaluations, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_commitment, proof_data); - Base::template serialize_to_buffer(libra_quotient_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_eval, proof_data); + Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); + Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); + Base::serialize_to_buffer(libra_quotient_commitment, proof_data); + Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); + Base::serialize_to_buffer(hiding_polynomial_eval, proof_data); for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_comms[i], proof_data); + Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); } for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_evals[i], proof_data); + Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); } - Base::template serialize_to_buffer(libra_concatenation_eval, proof_data); - Base::template serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_quotient_eval, proof_data); - Base::template serialize_to_buffer(this->shplonk_q_comm, proof_data); - Base::template serialize_to_buffer(this->kzg_w_comm, proof_data); + Base::serialize_to_buffer(libra_concatenation_eval, proof_data); + Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_quotient_eval, proof_data); + Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); + Base::serialize_to_buffer(this->kzg_w_comm, proof_data); BB_ASSERT_EQ(proof_data.size(), old_proof_length); } diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp index 497b3e4b5c25..7bce1ab1429f 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp @@ -60,6 +60,7 @@ template class UltraRecursiveFlavor_ { using NativeVerificationKey = NativeFlavor::VerificationKey; using Transcript = bb::BaseTranscript>; + static constexpr size_t VIRTUAL_LOG_N = UltraFlavor::VIRTUAL_LOG_N; // indicates when evaluating sumcheck, edges can be left as degree-1 monomials static constexpr bool USE_SHORT_MONOMIALS = UltraFlavor::USE_SHORT_MONOMIALS; @@ -139,18 +140,18 @@ template class UltraRecursiveFlavor_ { * @param builder * @param elements */ - VerificationKey(CircuitBuilder& builder, std::span elements) + VerificationKey(std::span elements) { using namespace bb::stdlib::field_conversion; size_t num_frs_read = 0; - this->log_circuit_size = deserialize_from_frs(builder, elements, num_frs_read); - this->num_public_inputs = deserialize_from_frs(builder, elements, num_frs_read); - this->pub_inputs_offset = deserialize_from_frs(builder, elements, num_frs_read); + this->log_circuit_size = deserialize_from_frs(elements, num_frs_read); + this->num_public_inputs = deserialize_from_frs(elements, num_frs_read); + this->pub_inputs_offset = deserialize_from_frs(elements, num_frs_read); for (Commitment& commitment : this->get_all()) { - commitment = deserialize_from_frs(builder, elements, num_frs_read); + commitment = deserialize_from_frs(elements, num_frs_read); } } @@ -169,7 +170,7 @@ template class UltraRecursiveFlavor_ { for (const auto& idx : witness_indices) { vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); } - return VerificationKey(builder, vk_fields); + return VerificationKey(vk_fields); } }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp index fa5a4fe57c14..f28bf66e37f3 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp @@ -15,7 +15,7 @@ class UltraRollupFlavor : public bb::UltraFlavor { public: static constexpr size_t num_frs_comm = bb::field_conversion::calc_num_bn254_frs(); static constexpr size_t num_frs_fr = bb::field_conversion::calc_num_bn254_frs(); - static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return UltraFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n) + IPA_PROOF_LENGTH; } @@ -33,8 +33,6 @@ class UltraRollupFlavor : public bb::UltraFlavor { */ class VerificationKey : public NativeVerificationKey_, Transcript> { public: - static constexpr size_t VERIFICATION_KEY_LENGTH = UltraFlavor::VerificationKey::VERIFICATION_KEY_LENGTH; - virtual ~VerificationKey() = default; bool operator==(const VerificationKey&) const = default; @@ -54,42 +52,6 @@ class UltraRollupFlavor : public bb::UltraFlavor { commitment = commitment_key.commit(polynomial); } } - - // Don't statically check for object completeness. - using MSGPACK_NO_STATIC_CHECK = std::true_type; - - // For serialising and deserialising data - MSGPACK_FIELDS(log_circuit_size, - num_public_inputs, - pub_inputs_offset, - q_m, - q_c, - q_l, - q_r, - q_o, - q_4, - q_lookup, - q_arith, - q_delta_range, - q_elliptic, - q_memory, - q_nnf, - q_poseidon2_external, - q_poseidon2_internal, - sigma_1, - sigma_2, - sigma_3, - sigma_4, - id_1, - id_2, - id_3, - id_4, - table_1, - table_2, - table_3, - table_4, - lagrange_first, - lagrange_last); }; using VerifierCommitments = VerifierCommitments_; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp index ae438fb4871f..1d9181f9b356 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp @@ -86,18 +86,18 @@ template class UltraRollupRecursiveFlavor_ : public Ultra * @param builder * @param elements */ - VerificationKey(CircuitBuilder& builder, std::span elements) + VerificationKey(std::span elements) { using namespace bb::stdlib::field_conversion; size_t num_frs_read = 0; - this->log_circuit_size = deserialize_from_frs(builder, elements, num_frs_read); - this->num_public_inputs = deserialize_from_frs(builder, elements, num_frs_read); - this->pub_inputs_offset = deserialize_from_frs(builder, elements, num_frs_read); + this->log_circuit_size = deserialize_from_frs(elements, num_frs_read); + this->num_public_inputs = deserialize_from_frs(elements, num_frs_read); + this->pub_inputs_offset = deserialize_from_frs(elements, num_frs_read); for (Commitment& commitment : this->get_all()) { - commitment = deserialize_from_frs(builder, elements, num_frs_read); + commitment = deserialize_from_frs(elements, num_frs_read); } } @@ -116,7 +116,7 @@ template class UltraRollupRecursiveFlavor_ : public Ultra for (const auto& idx : witness_indices) { vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); } - return VerificationKey(builder, vk_fields); + return VerificationKey(vk_fields); } }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp index 7f8ef7b5aef9..831bd6f053a7 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp @@ -158,41 +158,41 @@ class UltraZKFlavor : public UltraFlavor { size_t old_proof_length = proof_data.size(); proof_data.clear(); // clear proof_data so the rest of the function can replace it for (const auto& input : this->public_inputs) { - Base::template serialize_to_buffer(input, proof_data); + Base::serialize_to_buffer(input, proof_data); } - Base::template serialize_to_buffer(this->w_l_comm, proof_data); - Base::template serialize_to_buffer(this->w_r_comm, proof_data); - Base::template serialize_to_buffer(this->w_o_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_counts_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_tags_comm, proof_data); - Base::template serialize_to_buffer(this->w_4_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_inverses_comm, proof_data); - Base::template serialize_to_buffer(this->z_perm_comm, proof_data); - Base::template serialize_to_buffer(libra_concatenation_commitment, proof_data); - Base::template serialize_to_buffer(libra_sum, proof_data); + Base::serialize_to_buffer(this->w_l_comm, proof_data); + Base::serialize_to_buffer(this->w_r_comm, proof_data); + Base::serialize_to_buffer(this->w_o_comm, proof_data); + Base::serialize_to_buffer(this->lookup_read_counts_comm, proof_data); + Base::serialize_to_buffer(this->lookup_read_tags_comm, proof_data); + Base::serialize_to_buffer(this->w_4_comm, proof_data); + Base::serialize_to_buffer(this->lookup_inverses_comm, proof_data); + Base::serialize_to_buffer(this->z_perm_comm, proof_data); + Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); + Base::serialize_to_buffer(libra_sum, proof_data); for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); + Base::serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); } - Base::template serialize_to_buffer(libra_claimed_evaluation, proof_data); + Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); - Base::template serialize_to_buffer(this->sumcheck_evaluations, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_commitment, proof_data); - Base::template serialize_to_buffer(libra_quotient_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_eval, proof_data); + Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); + Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); + Base::serialize_to_buffer(libra_quotient_commitment, proof_data); + Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); + Base::serialize_to_buffer(hiding_polynomial_eval, proof_data); for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_comms[i], proof_data); + Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); } for (size_t i = 0; i < virtual_log_n; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_evals[i], proof_data); + Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); } - Base::template serialize_to_buffer(libra_concatenation_eval, proof_data); - Base::template serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_quotient_eval, proof_data); - Base::template serialize_to_buffer(this->shplonk_q_comm, proof_data); - Base::template serialize_to_buffer(this->kzg_w_comm, proof_data); + Base::serialize_to_buffer(libra_concatenation_eval, proof_data); + Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_quotient_eval, proof_data); + Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); + Base::serialize_to_buffer(this->kzg_w_comm, proof_data); BB_ASSERT_EQ(proof_data.size(), old_proof_length); } diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp b/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp index 01b3d713ec9d..6a2f0acc3298 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp @@ -7,6 +7,7 @@ #include "goblin.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/eccvm/eccvm_verifier.hpp" #include "barretenberg/translator_vm/translator_prover.hpp" #include "barretenberg/translator_vm/translator_proving_key.hpp" @@ -21,15 +22,16 @@ Goblin::Goblin(CommitmentKey bn254_commitment_key, const std::shar , transcript(transcript) {} -void Goblin::prove_merge(const std::shared_ptr& transcript) +void Goblin::prove_merge(const std::shared_ptr& transcript, const MergeSettings merge_settings) { - PROFILE_THIS_NAME("Goblin::merge"); - MergeProver merge_prover{ op_queue, MergeSettings::PREPEND, commitment_key, transcript }; + BB_BENCH_NAME("Goblin::prove_merge"); + MergeProver merge_prover{ op_queue, merge_settings, commitment_key, transcript }; merge_verification_queue.push_back(merge_prover.construct_proof()); } void Goblin::prove_eccvm() { + BB_BENCH_NAME("Goblin::prove_eccvm"); ECCVMBuilder eccvm_builder(op_queue); ECCVMProver eccvm_prover(eccvm_builder, transcript); goblin_proof.eccvm_proof = eccvm_prover.construct_proof(); @@ -40,18 +42,18 @@ void Goblin::prove_eccvm() void Goblin::prove_translator() { - PROFILE_THIS_NAME("Create TranslatorBuilder and TranslatorProver"); - TranslatorBuilder translator_builder(translation_batching_challenge_v, evaluation_challenge_x, op_queue); + BB_BENCH_NAME("Goblin::prove_translator"); + TranslatorBuilder translator_builder(translation_batching_challenge_v, evaluation_challenge_x, op_queue, avm_mode); auto translator_key = std::make_shared(translator_builder, commitment_key); TranslatorProver translator_prover(translator_key, transcript); goblin_proof.translator_proof = translator_prover.construct_proof(); } -GoblinProof Goblin::prove() +GoblinProof Goblin::prove(const MergeSettings merge_settings) { - PROFILE_THIS_NAME("Goblin::prove"); + BB_BENCH_NAME("Goblin::prove"); - prove_merge(transcript); // Use shared transcript for merge proving + prove_merge(transcript, merge_settings); // Use shared transcript for merge proving info("Constructing a Goblin proof with num ultra ops = ", op_queue->get_ultra_ops_table_num_rows()); BB_ASSERT_EQ(merge_verification_queue.size(), @@ -59,32 +61,27 @@ GoblinProof Goblin::prove() "Goblin::prove: merge_verification_queue should contain only a single proof at this stage."); goblin_proof.merge_proof = merge_verification_queue.back(); - { - PROFILE_THIS_NAME("prove_eccvm"); - vinfo("prove eccvm..."); - prove_eccvm(); - vinfo("finished eccvm proving."); - } - { - PROFILE_THIS_NAME("prove_translator"); - vinfo("prove translator..."); - prove_translator(); - vinfo("finished translator proving."); - } + vinfo("prove eccvm..."); + prove_eccvm(); + vinfo("finished eccvm proving."); + vinfo("prove translator..."); + prove_translator(); + vinfo("finished translator proving."); return goblin_proof; } std::pair Goblin::recursively_verify_merge( MegaBuilder& builder, const RecursiveMergeCommitments& merge_commitments, - const std::shared_ptr& transcript) + const std::shared_ptr& transcript, + const MergeSettings merge_settings) { ASSERT(!merge_verification_queue.empty()); // Recursively verify the next merge proof in the verification queue in a FIFO manner const MergeProof& merge_proof = merge_verification_queue.front(); const stdlib::Proof stdlib_merge_proof(builder, merge_proof); - MergeRecursiveVerifier merge_verifier{ &builder, MergeSettings::PREPEND, transcript }; + MergeRecursiveVerifier merge_verifier{ &builder, merge_settings, transcript }; auto [pairing_points, merged_table_commitments] = merge_verifier.verify_proof(stdlib_merge_proof, merge_commitments); @@ -95,9 +92,10 @@ std::pair Goblin::recu bool Goblin::verify(const GoblinProof& proof, const MergeCommitments& merge_commitments, - const std::shared_ptr& transcript) + const std::shared_ptr& transcript, + const MergeSettings merge_settings) { - MergeVerifier merge_verifier(MergeSettings::PREPEND, transcript); + MergeVerifier merge_verifier(merge_settings, transcript); auto [merge_verified, merged_table_commitments] = merge_verifier.verify_proof(proof.merge_proof, merge_commitments); ECCVMVerifier eccvm_verifier(transcript); @@ -126,4 +124,13 @@ bool Goblin::verify(const GoblinProof& proof, op_queue_consistency_verified; } +void Goblin::ensure_well_formed_op_queue_for_avm(MegaBuilder& builder) const +{ + BB_ASSERT_EQ(avm_mode, true, "ensure_well_formed_op_queue should only be called for avm"); + builder.queue_ecc_no_op(); + builder.queue_ecc_random_op(); + builder.queue_ecc_random_op(); + builder.queue_ecc_random_op(); +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp b/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp index 46cbf6fdf18f..d4f63a921c91 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp @@ -15,9 +15,9 @@ #include "barretenberg/stdlib/proof/proof.hpp" #include "barretenberg/translator_vm/translator_circuit_builder.hpp" #include "barretenberg/translator_vm/translator_flavor.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" #include "barretenberg/ultra_honk/merge_prover.hpp" #include "barretenberg/ultra_honk/merge_verifier.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" namespace bb { @@ -56,6 +56,11 @@ class Goblin { std::deque merge_verification_queue; // queue of merge proofs to be verified + // In AVM we only use Goblin for a single circuit (it's recursive verifier) whose proof is not required to be + // zero-knowledge. While Translator will still expect to find random ops at the beginning to ensure the accumulation + // result remains at a fixed row we opt for not adding random ops at the end of the op queue. + bool avm_mode = false; + struct VerificationKey { std::shared_ptr eccvm_verification_key = std::make_shared(); std::shared_ptr translator_verification_key = @@ -71,7 +76,8 @@ class Goblin { * * @param transcript */ - void prove_merge(const std::shared_ptr& transcript = std::make_shared()); + void prove_merge(const std::shared_ptr& transcript = std::make_shared(), + const MergeSettings merge_settings = MergeSettings::PREPEND); /** * @brief Construct an ECCVM proof and the translation polynomial evaluations @@ -89,7 +95,7 @@ class Goblin { * * @return Proof */ - GoblinProof prove(); + GoblinProof prove(const MergeSettings merge_settings = MergeSettings::PREPEND); /** * @brief Recursively verify the next merge proof in the merge verification queue. @@ -98,12 +104,14 @@ class Goblin { * @param builder The circuit in which the recursive verification will be performed. * @param inputs_commitments The commitment used by the Merge verifier * @param transcript The transcript to be passed to the MergeRecursiveVerifier. + * @param merge_settings How the most recent ecc op subtable is going to be merged into the table of ecc ops * @return Pair of PairingPoints and commitments to the merged tables as read from the proof by the Merge verifier */ std::pair recursively_verify_merge( MegaBuilder& builder, const RecursiveMergeCommitments& merge_commitments, - const std::shared_ptr& transcript); + const std::shared_ptr& transcript, + const MergeSettings merge_settings = MergeSettings::PREPEND); /** * @brief Verify a full Goblin proof (ECCVM, Translator, merge) @@ -112,13 +120,25 @@ class Goblin { * @param inputs_commitments The commitments used by the Merge verifier * @param merged_table_commitment The commitment to the merged table as read from the proof * @param transcript - * + * @param merge_settings How the most recent ecc op subtable is going to be merged into the table of ecc ops * @return Pair of verification result and commitments to the merged tables as read from the proof by the Merge * verifier */ static bool verify(const GoblinProof& proof, const MergeCommitments& merge_commitments, - const std::shared_ptr& transcript); + const std::shared_ptr& transcript, + const MergeSettings merge_settings = MergeSettings::PREPEND); + + /** + * @brief Translator requires the op queue to start with a no-op to ensure op queue polynomials are shiftable and + * then expects three random ops. This is due to the ZK requirement in ClientIVC. We need to also ensure these ops + * are present when Goblin is used for AVM, although we only ever have a single table of ecc ops and no ZK + * requiements. + * + * @todo (https://github.com/AztecProtocol/barretenberg/issues/1537) Asses whether two Translator variants (one with + * Zk and one without) would be a better option + */ + void ensure_well_formed_op_queue_for_avm(MegaBuilder& builder) const; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index 294dd9c57708..bc12d406a356 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -7,11 +7,12 @@ #pragma once #include "barretenberg/commitment_schemes/commitment_key.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/crypto/ecdsa/ecdsa.hpp" #include "barretenberg/crypto/merkle_tree/memory_store.hpp" #include "barretenberg/crypto/merkle_tree/merkle_tree.hpp" #include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/goblin/goblin.hpp" #include "barretenberg/srs/global_crs.hpp" #include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" #include "barretenberg/stdlib/hash/keccak/keccak.hpp" @@ -28,7 +29,7 @@ template void generate_sha256_test_circuit(Builder& builder, { std::string in; in.resize(32); - stdlib::packed_byte_array input(&builder, in); + stdlib::byte_array input(&builder, in); for (size_t i = 0; i < num_iterations; i++) { input = stdlib::SHA256::hash(input); } @@ -63,11 +64,10 @@ class GoblinMockCircuits { using Flavor = bb::MegaFlavor; using RecursiveFlavor = bb::MegaRecursiveFlavor_; using RecursiveVerifier = bb::stdlib::recursion::honk::UltraRecursiveVerifier_; - using DeciderVerificationKey = bb::DeciderVerificationKey_; - using RecursiveDeciderVerificationKey = - ::bb::stdlib::recursion::honk::RecursiveDeciderVerificationKey_; - using RecursiveVKAndHash = RecursiveDeciderVerificationKey::VKAndHash; - using RecursiveVerifierAccumulator = std::shared_ptr; + using VerifierInstance = bb::VerifierInstance_; + using RecursiveVerifierInstance = ::bb::stdlib::recursion::honk::RecursiveVerifierInstance_; + using RecursiveVKAndHash = RecursiveVerifierInstance::VKAndHash; + using RecursiveVerifierAccumulator = std::shared_ptr; using VerificationKey = Flavor::VerificationKey; static constexpr size_t NUM_WIRES = Flavor::NUM_WIRES; @@ -83,7 +83,7 @@ class GoblinMockCircuits { */ static void construct_mock_app_circuit(MegaBuilder& builder, bool large = false) { - PROFILE_THIS(); + BB_BENCH(); if (large) { // Results in circuit size 2^19 generate_sha256_test_circuit(builder, 9); @@ -95,7 +95,7 @@ class GoblinMockCircuits { // TODO(https://github.com/AztecProtocol/barretenberg/issues/911): We require goblin ops to be added to the // function circuit because we cannot support zero commtiments. While the builder handles this at - // DeciderProvingKey creation stage via the add_gates_to_ensure_all_polys_are_non_zero function for other + // ProverInstance creation stage via the add_gates_to_ensure_all_polys_are_non_zero function for other // MegaHonk circuits (where we don't explicitly need to add goblin ops), in IVC merge proving happens prior to // folding where the absense of goblin ecc ops will result in zero commitments. MockCircuits::construct_goblin_ecc_op_circuit(builder); @@ -109,7 +109,7 @@ class GoblinMockCircuits { */ static void add_some_ecc_op_gates(MegaBuilder& builder) { - PROFILE_THIS(); + BB_BENCH(); // Add some arbitrary ecc op gates for (size_t i = 0; i < 3; ++i) { @@ -125,10 +125,12 @@ class GoblinMockCircuits { /** * @brief Add some randomness into the op queue. */ - static void randomise_op_queue(MegaBuilder& builder) + static void randomise_op_queue(MegaBuilder& builder, size_t num_ops) { - builder.queue_ecc_random_op(); - builder.queue_ecc_random_op(); + + for (size_t i = 0; i < num_ops; ++i) { + builder.queue_ecc_random_op(); + } } /** @@ -136,19 +138,34 @@ class GoblinMockCircuits { * * @param builder */ - static void construct_simple_circuit(MegaBuilder& builder, bool last_circuit = false) + static void construct_simple_circuit(MegaBuilder& builder) { - PROFILE_THIS(); - // The last circuit to be accumulated must contain a no-op - if (last_circuit) { - builder.queue_ecc_no_op(); - } + BB_BENCH(); add_some_ecc_op_gates(builder); MockCircuits::construct_arithmetic_circuit(builder); bb::stdlib::recursion::honk::DefaultIO::add_default(builder); } + static void construct_and_merge_mock_circuits(Goblin& goblin, const size_t num_circuits = 3) + { + for (size_t idx = 0; idx < num_circuits - 1; ++idx) { + MegaCircuitBuilder builder{ goblin.op_queue }; + if (idx == num_circuits - 2) { + // Last circuit appended needs to begin with a no-op for translator to be shiftable + builder.queue_ecc_no_op(); + randomise_op_queue(builder, TranslatorCircuitBuilder::NUM_RANDOM_OPS_START); + } + construct_simple_circuit(builder); + goblin.prove_merge(); + // Pop the merge proof from the queue, Goblin will be verified at the end + goblin.merge_verification_queue.pop_front(); + } + MegaCircuitBuilder builder{ goblin.op_queue }; + GoblinMockCircuits::construct_simple_circuit(builder); + randomise_op_queue(builder, TranslatorCircuitBuilder::NUM_RANDOM_OPS_END); + } + /** * @brief Construct a mock kernel circuit * @details Construct an arbitrary circuit meant to represent the aztec private function execution kernel. Recursive @@ -160,7 +177,7 @@ class GoblinMockCircuits { */ static void construct_mock_folding_kernel(MegaBuilder& builder) { - PROFILE_THIS(); + BB_BENCH(); // Add operations representing general kernel logic e.g. state updates. Note: these are structured to make // the kernel "full" within the dyadic size 2^17 diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits_pinning.test.cpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits_pinning.test.cpp index 72036b8a5d0f..dd0a5e873728 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits_pinning.test.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits_pinning.test.cpp @@ -13,7 +13,7 @@ using namespace bb; */ class MegaMockCircuitsPinning : public ::testing::Test { protected: - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } }; @@ -24,11 +24,11 @@ TEST_F(MegaMockCircuitsPinning, AppCircuitSizes) MegaCircuitBuilder app_circuit{ goblin.op_queue }; GoblinMockCircuits::construct_mock_app_circuit(app_circuit, large); TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - auto proving_key = std::make_shared(app_circuit, trace_settings); + auto prover_instance = std::make_shared(app_circuit, trace_settings); if (large) { - EXPECT_EQ(proving_key->log_dyadic_size(), 19); + EXPECT_EQ(prover_instance->log_dyadic_size(), 19); } else { - EXPECT_EQ(proving_key->log_dyadic_size(), 19); + EXPECT_EQ(prover_instance->log_dyadic_size(), 19); }; }; run_test(true); @@ -44,8 +44,8 @@ TEST_F(MegaMockCircuitsPinning, SmallTestStructuredCircuitSize) MegaCircuitBuilder app_circuit{ goblin.op_queue }; stdlib::recursion::honk::DefaultIO::add_default(app_circuit); TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; - auto proving_key = std::make_shared(app_circuit, trace_settings); - EXPECT_EQ(proving_key->log_dyadic_size(), 18); + auto prover_instance = std::make_shared(app_circuit, trace_settings); + EXPECT_EQ(prover_instance->log_dyadic_size(), 18); } TEST_F(MegaMockCircuitsPinning, AztecStructuredCircuitSize) @@ -54,6 +54,6 @@ TEST_F(MegaMockCircuitsPinning, AztecStructuredCircuitSize) MegaCircuitBuilder app_circuit{ goblin.op_queue }; stdlib::recursion::honk::DefaultIO::add_default(app_circuit); TraceSettings trace_settings{ AZTEC_TRACE_STRUCTURE }; - auto proving_key = std::make_shared(app_circuit, trace_settings); - EXPECT_EQ(proving_key->log_dyadic_size(), 18); + auto prover_instance = std::make_shared(app_circuit, trace_settings); + EXPECT_EQ(prover_instance->log_dyadic_size(), 18); } diff --git a/barretenberg/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt index bc7540c72583..624b8ce7e672 100644 --- a/barretenberg/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt @@ -11,4 +11,16 @@ if (NOT(FUZZING)) ecc crypto_sha256 ) -endif() \ No newline at end of file + if(ENABLE_STACKTRACES) + target_link_libraries( + grumpkin_srs_gen + PUBLIC + Backward::Interface + ) + target_link_options( + grumpkin_srs_gen + PRIVATE + -ldw -lelf + ) + endif() +endif() diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp b/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp index edc9b9533bfc..7882bbf17ad3 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp @@ -90,7 +90,7 @@ template struct PermutationMapping { */ PermutationMapping(size_t circuit_size) { - PROFILE_THIS_NAME("PermutationMapping constructor"); + BB_BENCH_NAME("PermutationMapping constructor"); for (size_t wire_idx = 0; wire_idx < NUM_WIRES; ++wire_idx) { sigmas[wire_idx] = Mapping(circuit_size); @@ -218,20 +218,23 @@ PermutationMapping compute_permutation_mapping( * * @param permutation_polynomials sigma or ID poly * @param permutation_mappings - * @param dyadic_size dyadic size of the execution trace * @param active_region_data specifies regions of execution trace with non-trivial values */ template void compute_honk_style_permutation_lagrange_polynomials_from_mapping( const RefSpan& permutation_polynomials, const std::array& permutation_mappings, - const size_t dyadic_size, ActiveRegionData& active_region_data) { using FF = typename Flavor::FF; size_t domain_size = active_region_data.size(); + // SEPARATOR ensures that the evaluations of `id_i` (`sigma_i`) and `id_j`(`sigma_j`) polynomials on the boolean + // hypercube do not intersect for i != j. + const size_t SEPARATOR = PERMUTATION_ARGUMENT_VALUE_SEPARATOR; + BB_ASSERT_LT(permutation_polynomials[0].size(), SEPARATOR); + const MultithreadData thread_data = calculate_thread_data(domain_size); size_t wire_idx = 0; @@ -249,21 +252,21 @@ void compute_honk_style_permutation_lagrange_polynomials_from_mapping( if (current_is_public_input) { // We intentionally want to break the cycles of the public input variables. // During the witness generation, the left and right wire polynomials at idx i contain the i-th - // public input. The CyclicPermutation created for these variables always start with (i) -> (n+i), - // followed by the indices of the variables in the "real" gates. We make i point to - // -(i+1), so that the only way of repairing the cycle is add the mapping + // public input. Let n = SEPARATOR. The CyclicPermutation created for these variables + // always start with (i) -> (n+i), followed by the indices of the variables in the "real" gates. We + // make i point to -(i+1), so that the only way of repairing the cycle is add the mapping // -(i+1) -> (n+i) // These indices are chosen so they can easily be computed by the verifier. They can expect // the running product to be equal to the "public input delta" that is computed // in - current_permutation_poly.at(poly_idx) = -FF(current_row_idx + 1 + dyadic_size * current_col_idx); + current_permutation_poly.at(poly_idx) = -FF(current_row_idx + 1 + SEPARATOR * current_col_idx); } else if (current_is_tag) { // Set evaluations to (arbitrary) values disjoint from non-tag values - current_permutation_poly.at(poly_idx) = dyadic_size * Flavor::NUM_WIRES + current_row_idx; + current_permutation_poly.at(poly_idx) = SEPARATOR * Flavor::NUM_WIRES + current_row_idx; } else { // For the regular permutation we simply point to the next location by setting the // evaluation to its idx - current_permutation_poly.at(poly_idx) = FF(current_row_idx + dyadic_size * current_col_idx); + current_permutation_poly.at(poly_idx) = FF(current_row_idx + SEPARATOR * current_col_idx); } } }); @@ -273,12 +276,8 @@ void compute_honk_style_permutation_lagrange_polynomials_from_mapping( } // namespace /** - * @brief Compute Honk style generalized permutation sigmas and ids and add to proving_key - * - * @param circuit - * @param proving_key - * @param copy_cycles pre-computed sets of wire addresses whose values should be copy constrained - * + * @brief Compute Honk style generalized permutation sigmas and ids and add to prover_instance, where the + * copy_cycles are pre-computed sets of wire addresses whose values should be copy constrained. */ template void compute_permutation_argument_polynomials(const typename Flavor::CircuitBuilder& circuit, @@ -287,23 +286,23 @@ void compute_permutation_argument_polynomials(const typename Flavor::CircuitBuil ActiveRegionData& active_region_data) { constexpr bool generalized = IsUltraOrMegaHonk; - const size_t dyadic_size = polynomials.get_polynomial_size(); - auto mapping = compute_permutation_mapping(circuit, dyadic_size, copy_cycles); + const size_t polynomial_size = polynomials.get_polynomial_size(); + auto mapping = compute_permutation_mapping(circuit, polynomial_size, copy_cycles); // Compute Honk-style sigma and ID polynomials from the corresponding mappings { - PROFILE_THIS_NAME("compute_honk_style_permutation_lagrange_polynomials_from_mapping"); + BB_BENCH_NAME("compute_honk_style_permutation_lagrange_polynomials_from_mapping"); compute_honk_style_permutation_lagrange_polynomials_from_mapping( - polynomials.get_sigmas(), mapping.sigmas, dyadic_size, active_region_data); + polynomials.get_sigmas(), mapping.sigmas, active_region_data); } { - PROFILE_THIS_NAME("compute_honk_style_permutation_lagrange_polynomials_from_mapping"); + BB_BENCH_NAME("compute_honk_style_permutation_lagrange_polynomials_from_mapping"); compute_honk_style_permutation_lagrange_polynomials_from_mapping( - polynomials.get_ids(), mapping.ids, dyadic_size, active_region_data); + polynomials.get_ids(), mapping.ids, active_region_data); } } diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp index 715b9ab808f6..351d53ca3287 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp @@ -153,13 +153,13 @@ template class ZeroSelector : public Selector { void set(size_t idx, int value) override { - BB_ASSERT_LT(idx, size_); + ASSERT_DEBUG(idx < size_); BB_ASSERT_EQ(value, 0, "Calling ZeroSelector::set with a non zero value."); } void set(size_t idx, const FF& value) override { - BB_ASSERT_LT(idx, size_); + ASSERT_DEBUG(idx < size_); ASSERT(value.is_zero()); size_++; } @@ -170,7 +170,7 @@ template class ZeroSelector : public Selector { const FF& operator[](size_t index) const override { - BB_ASSERT_LT(index, size_); + ASSERT_DEBUG(index < size_); return zero; } diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp index 719e8430ee0d..c427b7e0c08a 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp @@ -83,7 +83,7 @@ struct ExecutionTraceUsageTracker { max_gates_size = std::max(max_gates_size, circuit.num_gates); - // update the max sixe of the databus and lookup tables + // update the max size of the databus and lookup tables max_databus_size = std::max({ max_databus_size, circuit.get_calldata().size(), circuit.get_secondary_calldata().size(), @@ -99,19 +99,18 @@ struct ExecutionTraceUsageTracker { active_ranges.push_back(Range{ start_idx, end_idx }); } - // The active ranges must also include the rows where the actual databus and lookup table data are stored. - // (Note: lookup tables are constructed from the beginning of the lookup block ; databus data is constructed at - // the start of the trace). - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1152): should be able to use simply Range{ 0, - // max_databus_size } but this breaks for certain choices of num_threads. It should also be possible to have the - // lookup table data be Range{lookup_start, max_tables_size} but that also breaks. - size_t databus_end = - std::max(max_databus_size, static_cast(fixed_sizes.busread.trace_offset() + max_sizes.busread)); - active_ranges.push_back(Range{ 0, databus_end }); - size_t lookups_start = fixed_sizes.lookup.trace_offset(); - size_t lookups_end = lookups_start + std::max(max_tables_size, static_cast(max_sizes.lookup)); - active_ranges.emplace_back(Range{ lookups_start, lookups_end }); + // The active range for lookup-style blocks consists of two components: (1) rows containing the lookup/read + // gates and (2) rows containing the table data itself. The Mega arithmetization contains two such blocks: + // conventional lookups (lookup block) and the databus (busread block). Here we add the ranges corresponding + // to the "table" data for these two blocks. The corresponding gate ranges were added above. + size_t databus_data_start = 0; // Databus column data starts at idx 0 + size_t databus_data_end = databus_data_start + max_databus_size; + active_ranges.push_back(Range{ databus_data_start, databus_data_end }); // region where databus contains data + + // Note: start of table data is aligned with start of the lookup gates block + size_t tables_start = fixed_sizes.lookup.trace_offset(); + size_t tables_end = tables_start + max_tables_size; + active_ranges.emplace_back(Range{ tables_start, tables_end }); // region where table data is stored } void print() diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp index 02a32227106b..b085291ca5af 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp @@ -136,6 +136,11 @@ class MegaTraceBlock : public ExecutionTraceBlock { */ void resize_additional(size_t new_size) { q_busread().resize(new_size); }; + /** + * @brief Default implementation does nothing + */ + virtual void set_gate_selector([[maybe_unused]] const fr& value) {} + private: std::array, 9> zero_selectors; }; @@ -146,6 +151,19 @@ class MegaTraceBusReadBlock : public MegaTraceBlock { public: SelectorType& q_busread() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + gate_selector.emplace_back(value); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -154,6 +172,19 @@ class MegaTraceLookupBlock : public MegaTraceBlock { public: SelectorType& q_lookup_type() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + gate_selector.emplace_back(value); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -162,6 +193,19 @@ class MegaTraceArithmeticBlock : public MegaTraceBlock { public: SelectorType& q_arith() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + gate_selector.emplace_back(value); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -170,6 +214,19 @@ class MegaTraceDeltaRangeBlock : public MegaTraceBlock { public: SelectorType& q_delta_range() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + gate_selector.emplace_back(value); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -178,6 +235,19 @@ class MegaTraceEllipticBlock : public MegaTraceBlock { public: SelectorType& q_elliptic() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + gate_selector.emplace_back(value); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -186,6 +256,19 @@ class MegaTraceMemoryBlock : public MegaTraceBlock { public: SelectorType& q_memory() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + gate_selector.emplace_back(value); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -194,6 +277,19 @@ class MegaTraceNonNativeFieldBlock : public MegaTraceBlock { public: SelectorType& q_nnf() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + gate_selector.emplace_back(value); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -202,6 +298,19 @@ class MegaTracePoseidon2ExternalBlock : public MegaTraceBlock { public: SelectorType& q_poseidon2_external() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + gate_selector.emplace_back(value); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -210,6 +319,19 @@ class MegaTracePoseidon2InternalBlock : public MegaTraceBlock { public: SelectorType& q_poseidon2_internal() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_busread().emplace_back(0); + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + gate_selector.emplace_back(value); + } + private: SlabVectorSelector gate_selector; }; @@ -483,6 +605,4 @@ static constexpr TraceStructure AZTEC_TRACE_STRUCTURE{ .ecc_op = 1000, .poseidon2_internal = 96500, .overflow = 0 }; -template -concept HasAdditionalSelectors = IsAnyOf; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/ultra_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/ultra_execution_trace.hpp index c2ea600fc6b0..50eccde40d0c 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/ultra_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/ultra_execution_trace.hpp @@ -43,6 +43,11 @@ class UltraTraceBlock : public ExecutionTraceBlock { q_poseidon2_internal() }; } + /** + * @brief Default implementation does nothing + */ + virtual void set_gate_selector([[maybe_unused]] const fr& value) {} + private: std::array, 8> zero_selectors; }; @@ -53,6 +58,18 @@ class UltraTraceLookupBlock : public UltraTraceBlock { public: SelectorType& q_lookup_type() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + gate_selector.emplace_back(value); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -61,6 +78,18 @@ class UltraTraceArithmeticBlock : public UltraTraceBlock { public: SelectorType& q_arith() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + gate_selector.emplace_back(value); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -69,6 +98,18 @@ class UltraTraceDeltaRangeBlock : public UltraTraceBlock { public: SelectorType& q_delta_range() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + gate_selector.emplace_back(value); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -77,6 +118,18 @@ class UltraTraceEllipticBlock : public UltraTraceBlock { public: SelectorType& q_elliptic() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + gate_selector.emplace_back(value); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -85,6 +138,18 @@ class UltraTraceMemoryBlock : public UltraTraceBlock { public: SelectorType& q_memory() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + gate_selector.emplace_back(value); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -93,6 +158,18 @@ class UltraTraceNonNativeFieldBlock : public UltraTraceBlock { public: SelectorType& q_nnf() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + gate_selector.emplace_back(value); + q_poseidon2_external().emplace_back(0); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -101,6 +178,18 @@ class UltraTracePoseidon2ExternalBlock : public UltraTraceBlock { public: SelectorType& q_poseidon2_external() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + gate_selector.emplace_back(value); + q_poseidon2_internal().emplace_back(0); + } + private: SlabVectorSelector gate_selector; }; @@ -109,6 +198,18 @@ class UltraTracePoseidon2InternalBlock : public UltraTraceBlock { public: SelectorType& q_poseidon2_internal() override { return gate_selector; } + void set_gate_selector(const fr& value) override + { + q_lookup_type().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + gate_selector.emplace_back(value); + } + private: SlabVectorSelector gate_selector; }; diff --git a/barretenberg/cpp/src/barretenberg/honk/library/grand_product_delta.hpp b/barretenberg/cpp/src/barretenberg/honk/library/grand_product_delta.hpp index 20ee235765c4..2d2de3a56361 100644 --- a/barretenberg/cpp/src/barretenberg/honk/library/grand_product_delta.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/library/grand_product_delta.hpp @@ -6,6 +6,7 @@ #pragma once +#include "barretenberg/constants.hpp" #include namespace bb { @@ -25,7 +26,6 @@ template typename Flavor::FF compute_public_input_delta(std::span public_inputs, const typename Flavor::FF& beta, const typename Flavor::FF& gamma, - const typename Flavor::FF& log_domain_size, const typename Flavor::FF& offset = 0) { using Field = typename Flavor::FF; @@ -55,10 +55,10 @@ typename Flavor::FF compute_public_input_delta(std::span; using View = typename Accumulator::View; - auto lookup_inverses = View(Relation::template get_inverse_polynomial(in)); + auto lookup_inverses = View(Relation::get_inverse_polynomial(in)); constexpr size_t NUM_TOTAL_TERMS = READ_TERMS + WRITE_TERMS; std::array lookup_terms; @@ -227,7 +225,7 @@ void accumulate_logderivative_permutation_subrelation_contributions(ContainerOve using Accumulator = typename std::tuple_element_t<0, ContainerOverSubrelations>; using View = typename Accumulator::View; - auto permutation_inverses = View(Relation::template get_inverse_polynomial(in)); + auto permutation_inverses = View(Relation::get_inverse_polynomial(in)); constexpr size_t NUM_TOTAL_TERMS = 2; std::array permutation_terms; diff --git a/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.cpp b/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.cpp new file mode 100644 index 000000000000..d5cc20da6070 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.cpp @@ -0,0 +1,9 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#include "prover_instance_inspector.hpp" + +// Hack to make the module compile. diff --git a/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.hpp b/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.hpp new file mode 100644 index 000000000000..8da959376c33 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/prover_instance_inspector.hpp @@ -0,0 +1,95 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once + +#include "barretenberg/common/assert.hpp" +#include "barretenberg/common/log.hpp" +#include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/honk/execution_trace/mega_execution_trace.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" + +namespace bb::prover_instance_inspector { + +// Helper for extracting a native Flavor from either a native or recursive flavor. +template > struct NativeFlavorHelper { + using type = Flavor; +}; +template struct NativeFlavorHelper { + using type = typename Flavor::NativeFlavor; +}; + +/** + * @brief Compute the hash of the verification key that results from constructing a proving key from the given circuit. + * @details This is useful for identifying the point of divergence for two circuits that are expected to be identical, + * for example, the circuit constructed from a given acir program with or without a genuine witness. + * + * @tparam Flavor Determines the type of PK and VK to be constructed. + * @tparam Builder The builder for the circuit in question. + */ +template +uint256_t compute_vk_hash(const Builder& circuit_in, + const TraceSettings& trace_settings = TraceSettings{ AZTEC_TRACE_STRUCTURE }) + requires(IsMegaFlavor && IsMegaBuilder) +{ + using NativeFlavor = typename NativeFlavorHelper::type; + using ProverInstance = typename bb::ProverInstance_; + using VerificationKey = NativeFlavor::VerificationKey; + + Builder circuit = circuit_in; // Copy the circuit to avoid modifying the original + + ProverInstance prover_instance{ circuit, trace_settings }; + VerificationKey verification_key{ prover_instance.get_precomputed() }; + + return verification_key.hash(); +} + +// A catch-all for Flavor/Builder combinations where the VK hash is not implemented. +template +uint256_t compute_vk_hash(const Builder&, const TraceSettings& = TraceSettings{ AZTEC_TRACE_STRUCTURE }) + requires(!IsMegaFlavor || !IsMegaBuilder) +{ + info("compute_vk_hash: Not implemented for this Flavor/Builder, returning 0."); + return 0; +} + +// Determine whether a polynomial has at least one non-zero coefficient +bool is_non_zero(auto& polynomial) +{ + for (auto& coeff : polynomial) { + if (!coeff.is_zero()) { + return true; + } + } + return false; +} + +/** + * @brief Utility for indicating which polynomials in a decider proving key are identically zero + * + * @param prover_instance + */ +void inspect_prover_instance(auto& prover_instance) +{ + auto& prover_polys = prover_instance->prover_polynomials; + std::vector zero_polys; + for (auto [label, poly] : zip_view(prover_polys.get_labels(), prover_polys.get_all())) { + if (!is_non_zero(poly)) { + zero_polys.emplace_back(label); + } + } + if (zero_polys.empty()) { + info("\nProving Key Inspector: All prover polynomials are non-zero."); + } else { + info("\nProving Key Inspector: The following prover polynomials are identically zero: "); + for (const std::string& label : zero_polys) { + info("\t", label); + } + } + info(); +} + +} // namespace bb::prover_instance_inspector diff --git a/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.cpp b/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.cpp deleted file mode 100644 index c22ac2943902..000000000000 --- a/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#include "proving_key_inspector.hpp" - -// Hack to make the module compile. \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.hpp b/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.hpp deleted file mode 100644 index 003929e23c75..000000000000 --- a/barretenberg/cpp/src/barretenberg/honk/proving_key_inspector.hpp +++ /dev/null @@ -1,95 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once - -#include "barretenberg/common/assert.hpp" -#include "barretenberg/common/log.hpp" -#include "barretenberg/flavor/flavor.hpp" -#include "barretenberg/honk/execution_trace/mega_execution_trace.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" - -namespace bb::proving_key_inspector { - -// Helper for extracting a native Flavor from either a native or recursive flavor. -template > struct NativeFlavorHelper { - using type = Flavor; -}; -template struct NativeFlavorHelper { - using type = typename Flavor::NativeFlavor; -}; - -/** - * @brief Compute the hash of the verification key that results from constructing a proving key from the given circuit. - * @details This is useful for identifying the point of divergence for two circuits that are expected to be identical, - * for example, the circuit constructed from a given acir program with or without a genuine witness. - * - * @tparam Flavor Determines the type of PK and VK to be constructed. - * @tparam Builder The builder for the circuit in question. - */ -template -uint256_t compute_vk_hash(const Builder& circuit_in, - const TraceSettings& trace_settings = TraceSettings{ AZTEC_TRACE_STRUCTURE }) - requires(IsMegaFlavor && IsMegaBuilder) -{ - using NativeFlavor = typename NativeFlavorHelper::type; - using DeciderProvingKey = typename bb::DeciderProvingKey_; - using VerificationKey = NativeFlavor::VerificationKey; - - Builder circuit = circuit_in; // Copy the circuit to avoid modifying the original - - DeciderProvingKey proving_key{ circuit, trace_settings }; - VerificationKey verification_key{ proving_key.get_precomputed() }; - - return verification_key.hash(); -} - -// A catch-all for Flavor/Builder combinations where the VK hash is not implemented. -template -uint256_t compute_vk_hash(const Builder&, const TraceSettings& = TraceSettings{ AZTEC_TRACE_STRUCTURE }) - requires(!IsMegaFlavor || !IsMegaBuilder) -{ - info("compute_vk_hash: Not implemented for this Flavor/Builder, returning 0."); - return 0; -} - -// Determine whether a polynomial has at least one non-zero coefficient -bool is_non_zero(auto& polynomial) -{ - for (auto& coeff : polynomial) { - if (!coeff.is_zero()) { - return true; - } - } - return false; -} - -/** - * @brief Utility for indicating which polynomials in a decider proving key are identically zero - * - * @param decider_proving_key - */ -void inspect_proving_key(auto& decider_proving_key) -{ - auto& prover_polys = decider_proving_key->prover_polynomials; - std::vector zero_polys; - for (auto [label, poly] : zip_view(prover_polys.get_labels(), prover_polys.get_all())) { - if (!is_non_zero(poly)) { - zero_polys.emplace_back(label); - } - } - if (zero_polys.empty()) { - info("\nProving Key Inspector: All prover polynomials are non-zero."); - } else { - info("\nProving Key Inspector: The following prover polynomials are identically zero: "); - for (const std::string& label : zero_polys) { - info("\t", label); - } - } - info(); -} - -} // namespace bb::proving_key_inspector diff --git a/barretenberg/cpp/src/barretenberg/honk/types/merkle_hash_type.hpp b/barretenberg/cpp/src/barretenberg/honk/types/merkle_hash_type.hpp deleted file mode 100644 index ebaf9bcec581..000000000000 --- a/barretenberg/cpp/src/barretenberg/honk/types/merkle_hash_type.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once - -namespace bb::merkle { -// TODO(https://github.com/AztecProtocol/barretenberg/issues/426) -enum HashType { FIXED_BASE_PEDERSEN, LOOKUP_PEDERSEN }; -} // namespace bb::merkle diff --git a/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp b/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp index 6a136d9f219b..494d511f9986 100644 --- a/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp @@ -50,7 +50,8 @@ static constexpr std::size_t KERNEL_PUBLIC_INPUTS_SIZE = /*pairing_inputs*/ PAIRING_POINTS_SIZE + /*kernel_return_data*/ GOBLIN_GROUP_PUBLIC_INPUTS_SIZE + /*app_return_data*/ GOBLIN_GROUP_PUBLIC_INPUTS_SIZE + - /*table_commitments*/ MEGA_EXECUTION_TRACE_NUM_WIRES * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE; + /*table_commitments*/ (MEGA_EXECUTION_TRACE_NUM_WIRES * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE) + + /*output_pg_accum_hash*/ FR_PUBLIC_INPUTS_SIZE; // Number of bb::fr elements used to represent the default public inputs, i.e., the pairing points static constexpr std::size_t DEFAULT_PUBLIC_INPUTS_SIZE = PAIRING_POINTS_SIZE; @@ -61,7 +62,7 @@ static constexpr std::size_t APP_PUBLIC_INPUTS_SIZE = PAIRING_POINTS_SIZE; // Number of bb::fr elements used to represent the public inputs of the HIDING kernel static constexpr std::size_t HIDING_KERNEL_PUBLIC_INPUTS_SIZE = /*pairing_inputs*/ PAIRING_POINTS_SIZE + - /*table_commitments*/ MEGA_EXECUTION_TRACE_NUM_WIRES * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE; + /*table_commitments*/ (MEGA_EXECUTION_TRACE_NUM_WIRES * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE); // Number of bb::fr elements used to represent the public inputs of a ROLLUP circuit static constexpr std::size_t ROLLUP_PUBLIC_INPUTS_SIZE = diff --git a/barretenberg/cpp/src/barretenberg/honk/utils/honk_key_gen.hpp b/barretenberg/cpp/src/barretenberg/honk/utils/honk_key_gen.hpp index 939242e77b59..6e63eabde58e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/utils/honk_key_gen.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/utils/honk_key_gen.hpp @@ -111,3 +111,5 @@ inline void output_vk_sol_ultra_honk(std::ostream& os, os << std::flush; } + + diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp index 1eb1602cf2cc..f196a20765d3 100644 --- a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "barretenberg/common/serialize.hpp" diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp index 75c737883ff9..a46e28eee454 100644 --- a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "barretenberg/common/serialize.hpp" diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp index 8ce5a9a0434b..9bbb54ee8cb4 100644 --- a/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp @@ -109,7 +109,7 @@ bool get_value(Key& key, uint64_t& data, const LMDBDatabase& db, const bb::lmdbl if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { return false; } - // use the deserialise key method for deserialising the index + // use the deserialise key method for deserializing the index deserialise_key(dbVal.mv_data, data); return true; } diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp index ffea7f2b5b12..9417a27036d2 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp @@ -4,7 +4,6 @@ #include "barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp" #include "napi.h" #include -#include #include #include #include diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp index 8ac61021cec0..d213cd19daa4 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp @@ -83,7 +83,7 @@ class AsyncMessageProcessor { auto data = std::make_shared>(length); std::copy_n(buffer.Data(), length, data->data()); - auto* op = new bb::nodejs::AsyncOperation(env, deferred, [=](msgpack::sbuffer& buf) { + auto* op = new bb::nodejs::AsyncOperation(env, deferred, [data, this, length](msgpack::sbuffer& buf) { msgpack::object_handle obj_handle = msgpack::unpack(data->data(), length); msgpack::object obj = obj_handle.get(); dispatcher.on_new_data(obj, buf); diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp index 03dda67355f6..9a2de1f4bcdf 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp @@ -234,8 +234,8 @@ WorldStateWrapper::WorldStateWrapper(const Napi::CallbackInfo& info) [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return delete_fork(obj, buffer); }); _dispatcher.register_target( - WorldStateMessageType::FINALISE_BLOCKS, - [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return set_finalised(obj, buffer); }); + WorldStateMessageType::FINALIZE_BLOCKS, + [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return set_finalized(obj, buffer); }); _dispatcher.register_target(WorldStateMessageType::UNWIND_BLOCKS, [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return unwind(obj, buffer); }); @@ -784,14 +784,14 @@ bool WorldStateWrapper::close(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateWrapper::set_finalised(msgpack::object& obj, msgpack::sbuffer& buf) const +bool WorldStateWrapper::set_finalized(msgpack::object& obj, msgpack::sbuffer& buf) const { TypedMessage request; obj.convert(request); - WorldStateStatusSummary status = _ws->set_finalised_blocks(request.value.toBlockNumber); + WorldStateStatusSummary status = _ws->set_finalized_blocks(request.value.toBlockNumber); MsgHeader header(request.header.messageId); messaging::TypedMessage resp_msg( - WorldStateMessageType::FINALISE_BLOCKS, header, { status }); + WorldStateMessageType::FINALIZE_BLOCKS, header, { status }); msgpack::pack(buf, resp_msg); return true; diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp index 9837071ecbfa..0ca51f46e9bc 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp @@ -60,7 +60,7 @@ class WorldStateWrapper : public Napi::ObjectWrap { bool close(msgpack::object& obj, msgpack::sbuffer& buffer); - bool set_finalised(msgpack::object& obj, msgpack::sbuffer& buffer) const; + bool set_finalized(msgpack::object& obj, msgpack::sbuffer& buffer) const; bool unwind(msgpack::object& obj, msgpack::sbuffer& buffer) const; bool remove_historical(msgpack::object& obj, msgpack::sbuffer& buffer) const; diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp index a3d6ea1a86f3..388cdc13f0bb 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp @@ -45,7 +45,7 @@ enum WorldStateMessageType { CREATE_FORK, DELETE_FORK, - FINALISE_BLOCKS, + FINALIZE_BLOCKS, UNWIND_BLOCKS, REMOVE_HISTORICAL_BLOCKS, diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp index 0056562b7c6f..3e6ab91965d3 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp @@ -96,14 +96,13 @@ class alignas(32) uint256_t { return { static_cast(a), static_cast(a >> 64), 0, 0 }; } - constexpr explicit operator uint128_t() { return (static_cast(data[1]) << 64) + data[0]; } - constexpr uint256_t& operator=(const uint256_t& other) noexcept = default; constexpr uint256_t& operator=(uint256_t&& other) noexcept = default; constexpr ~uint256_t() noexcept = default; explicit constexpr operator bool() const { return static_cast(data[0]); }; + constexpr explicit operator uint128_t() { return (static_cast(data[1]) << 64) + data[0]; } template explicit constexpr operator T() const { return static_cast(data[0]); }; [[nodiscard]] constexpr bool get_bit(uint64_t bit_index) const; diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp index 72bb884e6a8f..983323dad327 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp @@ -290,6 +290,7 @@ constexpr std::pair uint256_t::mul_extended(const uint256_ */ constexpr uint256_t uint256_t::slice(const uint64_t start, const uint64_t end) const { + assert(start < end); const uint64_t range = end - start; const uint256_t mask = (range == 256) ? -uint256_t(1) : (uint256_t(1) << range) - 1; return ((*this) >> start) & mask; diff --git a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp index 9f3906c06fa5..78253741a082 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp @@ -13,7 +13,7 @@ template std::pair, uintx> uintx::divmod_base(const uintx& b) const { - ASSERT(b != 0); + ASSERT_DEBUG(b != 0); if (*this == 0) { return { uintx(0), uintx(0) }; } diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp index aebdd6be7638..03b88839835f 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp @@ -48,6 +48,7 @@ class ECCOpQueue { EccvmRowTracker eccvm_row_tracker; public: + static const size_t OP_QUEUE_SIZE = 1 << CONST_OP_QUEUE_LOG_SIZE; /** * @brief Instantiate an initial ECC op subtable. */ @@ -63,10 +64,12 @@ class ECCOpQueue { ultra_ops_table.create_new_subtable(); } - void merge(MergeSettings settings = MergeSettings::PREPEND) + size_t get_current_subtable_size() const { return ultra_ops_table.get_current_subtable_size(); } + + void merge(MergeSettings settings = MergeSettings::PREPEND, std::optional ultra_fixed_offset = std::nullopt) { eccvm_ops_table.merge(settings); - ultra_ops_table.merge(settings); + ultra_ops_table.merge(settings, ultra_fixed_offset); } // Construct polynomials corresponding to the columns of the full aggregate ultra ecc ops table @@ -75,7 +78,8 @@ class ECCOpQueue { return ultra_ops_table.construct_table_columns(); } - // Construct polys corresponding to the columns of the aggregate ultra ops table, excluding the most recent subtable + // Construct polys corresponding to the columns of the aggregate ultra ops table, excluding the most recent + // subtable std::array, ULTRA_TABLE_WIDTH> construct_previous_ultra_ops_table_columns() const { return ultra_ops_table.construct_previous_table_columns(); @@ -97,8 +101,8 @@ class ECCOpQueue { size_t get_current_ultra_ops_subtable_num_rows() const { return ultra_ops_table.current_ultra_subtable_size(); } size_t get_previous_ultra_ops_table_num_rows() const { return ultra_ops_table.previous_ultra_table_size(); } - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1339): Consider making the ultra and eccvm ops getters - // more memory efficient + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1339): Consider making the ultra and eccvm ops + // getters more memory efficient // Get the full table of ECCVM ops in contiguous memory; construct it if it has not been constructed already std::vector& get_eccvm_ops() @@ -225,8 +229,8 @@ class ECCOpQueue { /** * @brief Writes randomness to the ultra ops table but adds no eccvm operations. * - * @details This method is used to add randomness to the ultra ops table with the aim of randomising the commitment - * and evaluations of its corresponding columns + * @details This method is used to add randomness to the ultra ops table with the aim of randomising the + * commitment and evaluations of its corresponding columns * @return UltraOp */ UltraOp random_op_ultra_only() @@ -307,7 +311,7 @@ class ECCOpQueue { Fr z_2 = 0; auto converted = scalar.from_montgomery_form(); uint256_t converted_u256(scalar); - if (converted_u256.get_msb() <= 128) { + if (converted_u256.get_msb() < 128) { ultra_op.z_1 = scalar; ultra_op.z_2 = 0; } else { diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp index a5cfd0e77cde..f6a95676bcda 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp @@ -28,9 +28,9 @@ class ECCOpQueueTest { * */ static void check_table_column_polynomials(const std::shared_ptr& op_queue, - MergeSettings settings = MergeSettings::PREPEND) + MergeSettings settings, + std::optional ultra_fixed_offset = std::nullopt) { - op_queue->merge(settings); // Construct column polynomials corresponding to the full table (T), the previous table (T_prev), and the // current subtable (t_current) auto table_polynomials = op_queue->construct_ultra_ops_table_columns(); @@ -52,32 +52,36 @@ class ECCOpQueueTest { eval_challenge.pow(current_subtable_size); // x^k * T_prev(x) EXPECT_EQ(table_eval, subtable_eval + shifted_previous_table_eval); } else { - // T(x) = T_prev(x) + x^k * t_current(x), where k is the size of the previous table + // APPEND merge performs concatenation directly to end of previous table or at a specified fixed offset const size_t prev_table_size = op_queue->get_previous_ultra_ops_table_num_rows(); // k - const Fr prev_table_eval = prev_table_poly.evaluate(eval_challenge); // T_prev(x) + const size_t shift_magnitude = ultra_fixed_offset.has_value() + ? ultra_fixed_offset.value() * bb::UltraEccOpsTable::NUM_ROWS_PER_OP + : prev_table_size; // k + // T(x) = T_prev(x) + x^k * t_current(x), where k is the shift magnitude + const Fr prev_table_eval = prev_table_poly.evaluate(eval_challenge); // T_prev(x) const Fr shifted_subtable_eval = - subtable_poly.evaluate(eval_challenge) * eval_challenge.pow(prev_table_size); // x^k * t_current(x) + subtable_poly.evaluate(eval_challenge) * eval_challenge.pow(shift_magnitude); // x^k * t_current(x) EXPECT_EQ(table_eval, shifted_subtable_eval + prev_table_eval); } } } /** - * @brief Check that the opcode values are consistent between the first column polynomial and the eccvm - ops table + * @brief Check that the opcode values are consistent between the ultra ops table and the eccvm ops table * * @param op_queue */ static void check_opcode_consistency_with_eccvm(const std::shared_ptr& op_queue) { - auto ultra_opcode_values = op_queue->construct_ultra_ops_table_columns()[0]; + auto ultra_table = op_queue->get_ultra_ops(); auto eccvm_table = op_queue->get_eccvm_ops(); - // Every second value in the opcode column polynomial should be 0 - EXPECT_EQ(eccvm_table.size() * 2, ultra_opcode_values.size()); - for (size_t i = 0; i < eccvm_table.size(); ++i) { - EXPECT_EQ(ultra_opcode_values[2 * i], eccvm_table[i].op_code.value()); - EXPECT_EQ(ultra_opcode_values[2 * i + 1], Fr(0)); + size_t j = 0; + for (const auto& ultra_op : ultra_table) { + if (ultra_op.op_code.value() == 0) { + continue; + } + EXPECT_EQ(ultra_op.op_code.value(), eccvm_table[j++].op_code.value()); } }; }; @@ -131,7 +135,9 @@ TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependOnly) const size_t NUM_SUBTABLES = 5; for (size_t i = 0; i < NUM_SUBTABLES; ++i) { ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); - ECCOpQueueTest::check_table_column_polynomials(op_queue); + MergeSettings settings = MergeSettings::PREPEND; + op_queue->merge(settings); + ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); } ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); @@ -147,12 +153,41 @@ TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependThenAppend) const size_t NUM_SUBTABLES = 2; for (size_t i = 0; i < NUM_SUBTABLES; ++i) { ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); - ECCOpQueueTest::check_table_column_polynomials(op_queue); + MergeSettings settings = MergeSettings::PREPEND; + op_queue->merge(settings); + ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); } // Do a single append operation ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); - ECCOpQueueTest::check_table_column_polynomials(op_queue, MergeSettings::APPEND); + MergeSettings settings = MergeSettings::APPEND; + op_queue->merge(settings); + ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); + + ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); +} + +TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependThenAppendAtFixedOffset) +{ + + // Instantiate an EccOpQueue and populate it with several subtables of ECC ops + auto op_queue = std::make_shared(); + + // Check that the table polynomials have the correct form after each subtable concatenation + const size_t NUM_SUBTABLES = 2; + for (size_t i = 0; i < NUM_SUBTABLES; ++i) { + ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); + MergeSettings settings = MergeSettings::PREPEND; + op_queue->merge(settings); + ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); + } + + // Do a single append operation at a fixed offset (sufficiently large as to not overlap with the existing table) + const size_t ultra_fixed_offset = op_queue->get_ultra_ops_table_num_rows() + 20; + ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); + MergeSettings settings = MergeSettings::APPEND; + op_queue->merge(settings, ultra_fixed_offset); + ECCOpQueueTest::check_table_column_polynomials(op_queue, settings, ultra_fixed_offset); ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); } diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp index ed0288c2eb6c..19cbf6ffe7ae 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp @@ -74,6 +74,8 @@ struct UltraOp { Fr z_2; bool return_is_infinity; + bool operator==(const UltraOp& other) const = default; + /** * @brief Get the point in standard form i.e. as two coordinates x and y in the base field or as a point at * infinity whose coordinates are set to (0,0). @@ -130,6 +132,7 @@ template class EccOpsTable { } size_t num_subtables() const { return table.size(); } + size_t get_current_subtable_size() const { return current_subtable.size(); } auto& get() const { return table; } @@ -230,35 +233,106 @@ class UltraEccOpsTable { size_t current_subtable_idx = 0; // index of the current subtable being constructed UltraOpsTable table; + // For fixed-location append functionality (ultra ops only) + std::optional fixed_append_offset; + bool has_fixed_append = false; + public: size_t size() const { return table.size(); } - size_t ultra_table_size() const { return table.size() * NUM_ROWS_PER_OP; } + size_t ultra_table_size() const + { + size_t base_size = table.size() * NUM_ROWS_PER_OP; + if (has_fixed_append && fixed_append_offset.has_value()) { + // Include zeros gap and final subtable at fixed location + size_t last_subtable_size = 0; + if (!table.get().empty()) { + // The last subtable in deque is the fixed-location one + last_subtable_size = table.get().back().size() * NUM_ROWS_PER_OP; + } + return std::max(base_size, (fixed_append_offset.value() * NUM_ROWS_PER_OP) + last_subtable_size); + } + return base_size; + } size_t current_ultra_subtable_size() const { return table.get()[current_subtable_idx].size() * NUM_ROWS_PER_OP; } size_t previous_ultra_table_size() const { return (ultra_table_size() - current_ultra_subtable_size()); } void create_new_subtable(size_t size_hint = 0) { table.create_new_subtable(size_hint); } void push(const UltraOp& op) { table.push(op); } - void merge(MergeSettings settings = MergeSettings::PREPEND) + void merge(MergeSettings settings = MergeSettings::PREPEND, std::optional offset = std::nullopt) { - table.merge(settings); - current_subtable_idx = settings == MergeSettings::PREPEND ? 0 : table.num_subtables() - 1; + if (settings == MergeSettings::APPEND) { + // All appends are treated as fixed-location for ultra ops + ASSERT(!has_fixed_append, "Can only perform fixed-location append once"); + // Set fixed location at which to append ultra ops. If nullopt --> append right after prepended tables + fixed_append_offset = offset; + has_fixed_append = true; + table.merge(settings); + current_subtable_idx = table.num_subtables() - 1; + } else { // MergeSettings::PREPEND + table.merge(settings); + current_subtable_idx = 0; + } } - std::vector get_reconstructed() const { return table.get_reconstructed(); } + size_t get_current_subtable_size() const { return table.get_current_subtable_size(); } + + std::vector get_reconstructed() const + { + if (has_fixed_append && fixed_append_offset.has_value()) { + return get_reconstructed_with_fixed_append(); + } + return table.get_reconstructed(); + } + std::vector get_reconstructed_with_fixed_append() const + { + + ASSERT(get_current_subtable_size() == 0, + "current subtable should be merged before reconstructing the full table of operations."); + + std::vector reconstructed_table; + reconstructed_table.reserve(1 << CONST_OP_QUEUE_LOG_SIZE); + + for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { + const auto& subtable = table.get()[subtable_idx]; + for (const auto& op : subtable) { + reconstructed_table.push_back(op); + } + } + + // Add zeros if fixed offset is larger than current size + if (has_fixed_append && fixed_append_offset.has_value()) { + size_t current_size = reconstructed_table.size(); + size_t target_offset = fixed_append_offset.value(); + // Fill gap with no-ops if needed + reconstructed_table.insert(reconstructed_table.end(), target_offset - current_size, UltraOp{ /*no-op*/ }); + } + + // Add the final subtable (appended at fixed location) + const auto& final_subtable = table.get()[table.num_subtables() - 1]; + for (const auto& op : final_subtable) { + reconstructed_table.push_back(op); + } + return reconstructed_table; + } // Construct the columns of the full ultra ecc ops table ColumnPolynomials construct_table_columns() const { const size_t poly_size = ultra_table_size(); + + if (has_fixed_append) { + // Handle fixed-location append: prepended tables first, then appended table at fixed offset + return construct_column_polynomials_with_fixed_append(poly_size); + } + + // Normal case: all subtables in order const size_t subtable_start_idx = 0; // include all subtables const size_t subtable_end_idx = table.num_subtables(); - return construct_column_polynomials_from_subtables(poly_size, subtable_start_idx, subtable_end_idx); } // Construct the columns of the previous full ultra ecc ops table ColumnPolynomials construct_previous_table_columns() const { - const size_t poly_size = previous_ultra_table_size(); const size_t subtable_start_idx = current_subtable_idx == 0 ? 1 : 0; const size_t subtable_end_idx = current_subtable_idx == 0 ? table.num_subtables() : table.num_subtables() - 1; @@ -266,8 +340,8 @@ class UltraEccOpsTable { return construct_column_polynomials_from_subtables(poly_size, subtable_start_idx, subtable_end_idx); } - // Construct the columns of the current ultra ecc ops subtable which is either the first or the last one depening on - // whether it has been prepended or appended + // Construct the columns of the current ultra ecc ops subtable which is either the first or the last one + // depening on whether it has been prepended or appended ColumnPolynomials construct_current_ultra_ops_subtable_columns() const { const size_t poly_size = current_ultra_subtable_size(); @@ -278,6 +352,60 @@ class UltraEccOpsTable { } private: + /** + * @brief Write a single UltraOp to the column polynomials at the given position + * @details Each op is written across 2 rows (NUM_ROWS_PER_OP) + * @param column_polynomials The column polynomials to write to + * @param op The operation to write + * @param row_idx The starting row index (will write to row_idx and row_idx+1) + */ + static void write_op_to_polynomials(ColumnPolynomials& column_polynomials, const UltraOp& op, const size_t row_idx) + { + column_polynomials[0].at(row_idx) = !op.op_code.is_random_op ? op.op_code.value() : op.op_code.random_value_1; + column_polynomials[1].at(row_idx) = op.x_lo; + column_polynomials[2].at(row_idx) = op.x_hi; + column_polynomials[3].at(row_idx) = op.y_lo; + column_polynomials[0].at(row_idx + 1) = !op.op_code.is_random_op ? 0 : op.op_code.random_value_2; + column_polynomials[1].at(row_idx + 1) = op.y_hi; + column_polynomials[2].at(row_idx + 1) = op.z_1; + column_polynomials[3].at(row_idx + 1) = op.z_2; + } + + /** + * @brief Construct polynomials with fixed-location append + * @details Process prepended subtables first, then place the appended subtable at the fixed offset + */ + ColumnPolynomials construct_column_polynomials_with_fixed_append(const size_t poly_size) const + { + ColumnPolynomials column_polynomials; + for (auto& poly : column_polynomials) { + poly = Polynomial(poly_size); // Initialized to zeros + } + + // Process all prepended subtables (all except last) + size_t i = 0; + for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { + const auto& subtable = table.get()[subtable_idx]; + for (const auto& op : subtable) { + write_op_to_polynomials(column_polynomials, op, i); + i += NUM_ROWS_PER_OP; + } + } + + // Place the appended subtable at the fixed offset + size_t append_position = fixed_append_offset.has_value() ? fixed_append_offset.value() * NUM_ROWS_PER_OP : i; + const auto& appended_subtable = table.get()[table.num_subtables() - 1]; + + size_t j = append_position; + for (const auto& op : appended_subtable) { + write_op_to_polynomials(column_polynomials, op, j); + j += NUM_ROWS_PER_OP; + } + + // Any gap between prepended tables and appended table remains zeros (from initialization) + return column_polynomials; + } + /** * @brief Construct polynomials corresponding to the columns of the reconstructed ultra ops table for the given * range of subtables @@ -297,17 +425,8 @@ class UltraEccOpsTable { for (size_t subtable_idx = subtable_start_idx; subtable_idx < subtable_end_idx; ++subtable_idx) { const auto& subtable = table.get()[subtable_idx]; for (const auto& op : subtable) { - column_polynomials[0].at(i) = !op.op_code.is_random_op ? op.op_code.value() : op.op_code.random_value_1; - column_polynomials[1].at(i) = op.x_lo; - column_polynomials[2].at(i) = op.x_hi; - column_polynomials[3].at(i) = op.y_lo; - i++; - column_polynomials[0].at(i) = !op.op_code.is_random_op ? 0 : op.op_code.random_value_2; - // only the first 'op' field is utilized - column_polynomials[1].at(i) = op.y_hi; - column_polynomials[2].at(i) = op.z_1; - column_polynomials[3].at(i) = op.z_2; - i++; + write_op_to_polynomials(column_polynomials, op, i); + i += NUM_ROWS_PER_OP; } } return column_polynomials; diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp index 79032facfa28..cf4511632361 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp @@ -196,6 +196,173 @@ TEST(EccOpsTableTest, UltraOpsPrependThenAppend) } } +TEST(EccOpsTableTest, UltraOpsFixedLocationAppendNoGap) +{ + using Fr = fr; + using TableGenerator = EccOpsTableTest::UltraOpTableGenerator; + + // Construct sets of ultra ops + const size_t NUM_SUBTABLES = 3; + std::vector subtable_op_counts = { 4, 2, 7 }; + + TableGenerator table_generator; + auto subtables = table_generator.generate_subtables(NUM_SUBTABLES, subtable_op_counts); + + // Construct the concatenated table with fixed-location append (no explicit offset) + UltraEccOpsTable ultra_ops_table; + std::array merge_settings = { MergeSettings::PREPEND, + MergeSettings::PREPEND, + MergeSettings::APPEND }; + + for (size_t i = 0; i < NUM_SUBTABLES; ++i) { + ultra_ops_table.create_new_subtable(); + for (const auto& op : subtables[i]) { + ultra_ops_table.push(op); + } + + // For APPEND (last subtable), don't provide an offset (default to right after prepended tables) + ultra_ops_table.merge(merge_settings[i]); + } + + // Expected order: subtable[1], subtable[0], subtable[2] (no gap) + std::vector> ordered_subtables = { subtables[1], subtables[0], subtables[2] }; + + // Construct the mock ultra ops table + EccOpsTableTest::MockUltraOpsTable expected_ultra_ops_table(ordered_subtables); + + // Check that the ultra ops table has the correct size + auto expected_num_ops = std::accumulate(subtable_op_counts.begin(), subtable_op_counts.end(), size_t(0)); + EXPECT_EQ(ultra_ops_table.size(), expected_num_ops); + + // Construct polynomials corresponding to the columns of the ultra ops table + std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); + + // Check that the ultra ops table matches the expected table + for (auto [expected_column, poly] : zip_view(expected_ultra_ops_table.columns, ultra_ops_table_polynomials)) { + for (auto [expected_value, value] : zip_view(expected_column, poly.coeffs())) { + EXPECT_EQ(expected_value, value); + } + } +} + +TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) +{ + using Fr = fr; + using TableGenerator = EccOpsTableTest::UltraOpTableGenerator; + + const size_t ULTRA_ROWS_PER_OP = UltraEccOpsTable::NUM_ROWS_PER_OP; + + // Construct sets of ultra ops + const size_t NUM_SUBTABLES = 3; + std::vector subtable_op_counts = { 4, 2, 7 }; + + TableGenerator table_generator; + auto subtables = table_generator.generate_subtables(NUM_SUBTABLES, subtable_op_counts); + + // Construct the concatenated table with fixed-location append at specific offset + UltraEccOpsTable ultra_ops_table; + std::array merge_settings = { MergeSettings::PREPEND, + MergeSettings::PREPEND, + MergeSettings::APPEND }; + + // Define a fixed offset at which to append the table (must be greater than the total size of the prepended tables) + const size_t fixed_offset = 20; + const size_t fixed_offset_num_rows = fixed_offset * ULTRA_ROWS_PER_OP; + const size_t prepended_size = (subtable_op_counts[0] + subtable_op_counts[1]) * ULTRA_ROWS_PER_OP; + ASSERT(fixed_offset_num_rows > prepended_size); + + // Construct the ultra ops table + for (size_t i = 0; i < NUM_SUBTABLES; ++i) { + ultra_ops_table.create_new_subtable(); + for (const auto& op : subtables[i]) { + ultra_ops_table.push(op); + } + + // For APPEND (last subtable), provide a fixed offset + if (merge_settings[i] == MergeSettings::APPEND) { + ultra_ops_table.merge(merge_settings[i], fixed_offset); + } else { + ultra_ops_table.merge(merge_settings[i]); + } + } + + // Check that the ultra ops table has the correct total size (gap is not present in raw ops table) + auto expected_num_ops = std::accumulate(subtable_op_counts.begin(), subtable_op_counts.end(), size_t(0)); + EXPECT_EQ(ultra_ops_table.size(), expected_num_ops); + + // Check that the polynomials have the correct size (including gap) + size_t expected_poly_size = fixed_offset_num_rows + (subtable_op_counts[2] * ULTRA_ROWS_PER_OP); + EXPECT_EQ(ultra_ops_table.ultra_table_size(), expected_poly_size); + + // Construct polynomials corresponding to the columns of the ultra ops table + std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); + + // Verify each polynomial has the expected size + for (const auto& poly : ultra_ops_table_polynomials) { + EXPECT_EQ(poly.size(), expected_poly_size); + } + + // Construct expected table with zeros in the gap + // Order: subtable[1], subtable[0], zeros, subtable[2] + std::vector> ordered_subtables = { subtables[1], subtables[0] }; + EccOpsTableTest::MockUltraOpsTable expected_prepended_table(ordered_subtables); + + // Check prepended subtables are at the beginning + for (auto [ultra_op_poly, expected_poly] : + zip_view(ultra_ops_table_polynomials, expected_prepended_table.columns)) { + for (size_t row = 0; row < prepended_size; ++row) { + EXPECT_EQ(ultra_op_poly.at(row), expected_poly[row]); + } + } + + // Check gap from offset to appended subtable is filled with zeros + for (auto ultra_op_poly : ultra_ops_table_polynomials) { + for (size_t row = prepended_size; row < fixed_offset_num_rows; ++row) { + EXPECT_EQ(ultra_op_poly.at(row), Fr::zero()); + } + } + + // Check appended subtable is at the fixed offset + std::vector> appended_subtables = { subtables[2] }; + EccOpsTableTest::MockUltraOpsTable expected_appended_table(appended_subtables); + for (auto [ultra_op_poly, expected_poly] : zip_view(ultra_ops_table_polynomials, expected_appended_table.columns)) { + for (size_t row = 0; row < subtable_op_counts[2] * ULTRA_ROWS_PER_OP; row++) { + EXPECT_EQ(ultra_op_poly.at(fixed_offset_num_rows + row), expected_poly[row]); + } + } + + // Mimic get_reconstructed by unifying all the ops from subtables into a single vector with the appropriate append + // offset + { + std::vector expected_reconstructed; + expected_reconstructed.reserve(expected_num_ops + fixed_offset); + + // Order: subtable[1], subtable[0], no-ops range, subtable[2] + for (const auto& op : subtables[1]) { + expected_reconstructed.push_back(op); + } + for (const auto& op : subtables[0]) { + expected_reconstructed.push_back(op); + } + + // Add the range of noops + UltraOp no_op = {}; + size_t size_before = expected_reconstructed.size(); + for (size_t i = size_before; i < fixed_offset; i++) { + expected_reconstructed.push_back(no_op); + } + + for (const auto& op : subtables[2]) { + expected_reconstructed.push_back(op); + } + + EXPECT_EQ(expected_reconstructed.size(), ultra_ops_table.get_reconstructed().size()); + + // Compare to the op-queue's reconstruction (should include the gap as no-ops) + EXPECT_EQ(expected_reconstructed, ultra_ops_table.get_reconstructed()); + } +} + // Ensure EccvmOpsTable correctly constructs a concatenated table from successively prepended subtables TEST(EccOpsTableTest, EccvmOpsTable) { diff --git a/barretenberg/cpp/src/barretenberg/op_queue/eccvm_row_tracker.hpp b/barretenberg/cpp/src/barretenberg/op_queue/eccvm_row_tracker.hpp index ceffa2600fbd..49945ef6b295 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/eccvm_row_tracker.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/eccvm_row_tracker.hpp @@ -38,7 +38,12 @@ class EccvmRowTracker { static uint32_t num_eccvm_msm_rows(const size_t msm_size) { const size_t rows_per_wnaf_digit = - (msm_size / eccvm::ADDITIONS_PER_ROW) + ((msm_size % eccvm::ADDITIONS_PER_ROW != 0) ? 1 : 0); + (msm_size / eccvm::ADDITIONS_PER_ROW) + + ((msm_size % eccvm::ADDITIONS_PER_ROW != 0) + ? 1 + : 0); // the Straus algorithm proceeds by incrementing through the digit-slots and doing + // computations *across* the MSMs. Each digit-slot therefore contributes the *ceiling* of + // `msm_size`/`ADDITIONS_PER_ROW`. const size_t num_rows_for_all_rounds = (eccvm::NUM_WNAF_DIGITS_PER_SCALAR + 1) * rows_per_wnaf_digit; // + 1 round for skew const size_t num_double_rounds = eccvm::NUM_WNAF_DIGITS_PER_SCALAR - 1; diff --git a/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.cpp b/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.cpp index 2f72b2de7a6d..13c9ecd1c4b8 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.cpp @@ -1,5 +1,78 @@ #include "barretenberg/polynomials/backing_memory.hpp" +#include "barretenberg/common/throw_or_abort.hpp" +#include +#include +#include +#include +#include // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) bool slow_low_memory = std::getenv("BB_SLOW_LOW_MEMORY") == nullptr ? false : std::string(std::getenv("BB_SLOW_LOW_MEMORY")) == "1"; + +// Storage budget is disabled for WASM builds +#ifndef __wasm__ + +// Parse storage size string (e.g., "500m", "2g", "1024k") +size_t parse_size_string(const std::string& size_str) +{ + if (size_str.empty()) { + return std::numeric_limits::max(); + } + + try { + std::string str = size_str; + + // Convert to lowercase for case-insensitive comparison + char suffix = static_cast(std::tolower(static_cast(str.back()))); + size_t multiplier = 1; + + // Check for unit suffix + if (suffix == 'k') { + multiplier = 1024ULL; + str.pop_back(); + } else if (suffix == 'm') { + multiplier = 1024ULL * 1024ULL; + str.pop_back(); + } else if (suffix == 'g') { + multiplier = 1024ULL * 1024ULL * 1024ULL; + str.pop_back(); + } else if (std::isdigit(static_cast(suffix)) == 0) { + // Invalid suffix + throw_or_abort("Invalid storage size format: '" + size_str + "'. Use format like '500m', '2g', or '1024k'"); + } + + // Check if remaining string is a valid number + if (str.empty()) { + throw_or_abort("Invalid storage size format: '" + size_str + "'. No numeric value provided"); + } + + size_t value = std::stoull(str); + return value * multiplier; + } catch (const std::invalid_argument&) { + throw_or_abort("Invalid storage size format: '" + size_str + "'. Not a valid number"); + } catch (const std::out_of_range&) { + throw_or_abort("Invalid storage size format: '" + size_str + "'. Value out of range"); + } +} + +namespace { +// Parse storage budget from environment variable (supports k/m/g suffixes like Docker) +size_t parse_storage_budget() +{ + const char* env_val = std::getenv("BB_STORAGE_BUDGET"); + if (env_val == nullptr) { + return std::numeric_limits::max(); // No limit by default + } + + return parse_size_string(std::string(env_val)); +} +} // namespace + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +size_t storage_budget = parse_storage_budget(); + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +std::atomic current_storage_usage{ 0 }; + +#endif // __wasm__ diff --git a/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.hpp b/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.hpp index 02cc3cdf8b3c..5921d9867c62 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/backing_memory.hpp @@ -14,135 +14,164 @@ #include #include #include -#ifndef _WASI_EMULATED_PROCESS_CLOCKS +#ifndef __wasm__ #include #endif // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) extern bool slow_low_memory; -template class AlignedMemory; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +extern size_t storage_budget; -#ifndef _WASI_EMULATED_PROCESS_CLOCKS -template class FileBackedMemory; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +extern std::atomic current_storage_usage; + +// Parse storage size string (e.g., "500m", "2g", "1024k") +size_t parse_size_string(const std::string& size_str); + +template struct BackingMemory { + // Common raw data pointer used by all storage types + Fr* raw_data = nullptr; + +#ifndef __wasm__ + // File-backed data substruct with cleanup metadata + struct FileBackedData { + size_t file_size; + std::string filename; + int fd; + Fr* raw_data_ptr; + + ~FileBackedData() + { + if (raw_data_ptr != nullptr && file_size > 0) { + munmap(raw_data_ptr, file_size); + current_storage_usage.fetch_sub(file_size); + } + if (fd >= 0) { + close(fd); + } + if (!filename.empty()) { + std::filesystem::remove(filename); + } + } + }; + std::shared_ptr file_backed; #endif + // Aligned memory data substruct + std::shared_ptr aligned_memory; -template class BackingMemory { - public: BackingMemory() = default; - BackingMemory(const BackingMemory&) = delete; // delete copy constructor - BackingMemory& operator=(const BackingMemory&) = delete; // delete copy assignment + BackingMemory(const BackingMemory&) = default; + BackingMemory& operator=(const BackingMemory&) = default; - BackingMemory(BackingMemory&& other) = delete; // delete move constructor - BackingMemory& operator=(const BackingMemory&&) = delete; // delete move assignment + BackingMemory(BackingMemory&& other) noexcept + : raw_data(other.raw_data) +#ifndef __wasm__ + , file_backed(std::move(other.file_backed)) +#endif + , aligned_memory(std::move(other.aligned_memory)) + { + other.raw_data = nullptr; + } - virtual Fr* raw_data() = 0; + BackingMemory& operator=(BackingMemory&& other) noexcept + { + if (this != &other) { + raw_data = other.raw_data; +#ifndef __wasm__ + file_backed = std::move(other.file_backed); +#endif + aligned_memory = std::move(other.aligned_memory); + other.raw_data = nullptr; + } + return *this; + } - static std::shared_ptr> allocate(size_t size) + // Allocate memory, preferring file-backed if in low memory mode + static BackingMemory allocate(size_t size) { -#ifndef _WASI_EMULATED_PROCESS_CLOCKS + BackingMemory memory; +#ifndef __wasm__ if (slow_low_memory) { - return std::shared_ptr>(new FileBackedMemory(size)); + if (try_allocate_file_backed(memory, size)) { + return memory; + } } #endif - return std::shared_ptr>(new AlignedMemory(size)); + allocate_aligned(memory, size); + return memory; } - virtual ~BackingMemory() = default; -}; - -template class AlignedMemory : public BackingMemory { - public: - T* raw_data() { return data.get(); } + ~BackingMemory() = default; private: - AlignedMemory(size_t size) - : BackingMemory() + static void allocate_aligned(BackingMemory& memory, size_t size) + { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) - , data(std::static_pointer_cast(std::move(bb::get_mem_slab(sizeof(T) * size)))) - {} - - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) - std::shared_ptr data; - - friend BackingMemory; -}; - -#ifndef _WASI_EMULATED_PROCESS_CLOCKS -template class FileBackedMemory : public BackingMemory { - public: - FileBackedMemory(const FileBackedMemory&) = delete; // delete copy constructor - FileBackedMemory& operator=(const FileBackedMemory&) = delete; // delete copy assignment - - FileBackedMemory(FileBackedMemory&& other) = delete; // delete move constructor - FileBackedMemory& operator=(const FileBackedMemory&&) = delete; // delete move assignment - - T* raw_data() { return memory; } + memory.aligned_memory = std::static_pointer_cast(std::move(bb::get_mem_slab(sizeof(Fr) * size))); + memory.raw_data = memory.aligned_memory.get(); + } - ~FileBackedMemory() +#ifndef __wasm__ + static bool try_allocate_file_backed(BackingMemory& memory, size_t size) { - if (file_size == 0) { - return; - } - if (memory != nullptr && file_size > 0) { - munmap(memory, file_size); - } - if (fd >= 0) { - close(fd); - } - if (!filename.empty()) { - std::filesystem::remove(filename); + if (size == 0) { + return false; } - } - private: - // Create a new file-backed memory region - FileBackedMemory(size_t size) - : BackingMemory() - , file_size(size * sizeof(T)) - { - if (file_size == 0) { - return; + size_t required_bytes = size * sizeof(Fr); + size_t current_usage = current_storage_usage.load(); + + // Check if we're under the storage budget + if (current_usage + required_bytes > storage_budget) { + return false; } + size_t file_size = required_bytes; static std::atomic file_counter{ 0 }; size_t id = file_counter.fetch_add(1); + std::filesystem::path temp_dir; try { temp_dir = std::filesystem::temp_directory_path(); } catch (const std::exception&) { - // Fallback to current directory if temp_directory_path() fails temp_dir = std::filesystem::current_path(); } - filename = temp_dir / ("poly-mmap-" + std::to_string(getpid()) + "-" + std::to_string(id)); + std::string filename = temp_dir / ("poly-mmap-" + std::to_string(getpid()) + "-" + std::to_string(id)); - fd = open(filename.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644); - // Create file + int fd = open(filename.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644); if (fd < 0) { - throw_or_abort("Failed to create backing file: " + filename); + return false; } - // Set file size if (ftruncate(fd, static_cast(file_size)) != 0) { - throw_or_abort("Failed to set file size"); + close(fd); + std::filesystem::remove(filename); + return false; } - // Memory map the file void* addr = mmap(nullptr, file_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (addr == MAP_FAILED) { - throw_or_abort("Failed to mmap file: " + std::string(std::strerror(errno))); + close(fd); + std::filesystem::remove(filename); + return false; } - memory = static_cast(addr); - } + auto file_backed_data = std::make_shared(); + file_backed_data->file_size = file_size; + file_backed_data->filename = filename; + file_backed_data->fd = fd; + file_backed_data->raw_data_ptr = static_cast(addr); + + memory.raw_data = static_cast(addr); + memory.file_backed = std::move(file_backed_data); - size_t file_size; - std::string filename; - int fd; - T* memory; + current_storage_usage.fetch_add(required_bytes); - friend BackingMemory; + return true; + } +#endif }; -#endif // __EMSCRIPTEN___ diff --git a/barretenberg/cpp/src/barretenberg/polynomials/evaluation_domain.hpp b/barretenberg/cpp/src/barretenberg/polynomials/evaluation_domain.hpp index c99a3dcb9578..f1c7bb89e518 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/evaluation_domain.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/evaluation_domain.hpp @@ -28,7 +28,7 @@ template class EvaluationDomain { , generator(FF::zero()) , generator_inverse(FF::zero()) , four_inverse(FF::zero()) - , roots(nullptr){}; + , roots(nullptr) {}; EvaluationDomain(const size_t domain_size, const size_t target_generator_size = 0); EvaluationDomain(const EvaluationDomain& other); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/gate_separator.hpp b/barretenberg/cpp/src/barretenberg/polynomials/gate_separator.hpp index 1de867c7b585..937fc033ab08 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/gate_separator.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/gate_separator.hpp @@ -5,9 +5,10 @@ // ===================== #pragma once +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/compiler_hints.hpp" -#include "barretenberg/common/op_count.hpp" #include "barretenberg/common/thread.hpp" +#include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/stdlib/primitives/bool/bool.hpp" #include @@ -68,6 +69,19 @@ template struct GateSeparatorPolynomial { : betas(betas) {} + /** + * @brief Constructs a virtual GateSeparator used by the prover in rounds k > d - 1, and computes its partial + * evaluation at (u_0, ..., u_{d-1}). + * + */ + GateSeparatorPolynomial(const std::vector& betas, const std::vector& challenge) + : betas(betas) + { + for (const auto& u_k : challenge) { + partially_evaluate(u_k); + } + } + /** * @brief Retruns the element in #beta_products at place #idx. * @@ -130,7 +144,7 @@ template struct GateSeparatorPolynomial { BB_PROFILE static std::vector compute_beta_products(const std::vector& betas, const size_t log_num_monomials) { - + BB_BENCH_NAME("GateSeparatorPolynomial::compute_beta_products"); size_t pow_size = 1 << log_num_monomials; std::vector beta_products(pow_size); @@ -144,22 +158,62 @@ template struct GateSeparatorPolynomial { size_t num_threads = std::min(desired_num_threads, max_num_threads); // fewer than max if justified num_threads = num_threads > 0 ? num_threads : 1; // ensure num threads is >= 1 size_t iterations_per_thread = pow_size / num_threads; // actual iterations per thread + const size_t num_betas_per_thread = numeric::get_msb(iterations_per_thread); + + // Explanations of the algorithm: + // The product of the betas at index i (beta_products[i]) contains the multiplicative factor betas[j] if and + // only if the jth bit of i is 1 (j starting with 0 for the least significant bit). For instance, i = 13 = 1101 + // in binary, so the product is betas[0] * betas[2] * betas[3]. Observe that if we toggle the kth bit of i (0 to + // 1), i.e., we add 2^k to i, then the product is multiplied by betas[k]: beta_products[i + 2^k] = + // beta_products[i] * betas[k]. If we know the products for the interval of indices [0, 2^k), we can compute all + // the products for the interval of indices [2^k, 2^(k+1)) by multiplying each element by betas[k]. Iteratively, + // starting with beta_products[0] = 1, we can double the number of computed products at each iteration by + // multiplying the previous products by betas[k]. We first multiply beta_products[0] = 1 by betas[0], then we + // multiply beta_products[0] and beta_products[1] by betas[1], etc... + // + // We distribute the computation of the beta_products evenly across threads, i.e., thread number + // thread_idx will handle the interval of indices [thread_idx * iterations_per_thread, (thread_idx + 1) * + // iterations_per_thread). Note that for a given thread, all the processed indices have the same + // prefix in binary. Therefore, each beta_product of the thread is a multiple of this "prefix product". The + // successive products are then populated by the above algorithm whereby we double the interval at each + // iteration and multiply by the new beta to process the suffix bits. The difference is that we initialize the + // first product with this "prefix product" instead of 1. + + // Compute the prefix products for each thread + std::vector thread_prefix_beta_products(num_threads); + thread_prefix_beta_products.at(0) = 1; + + // Same algorithm applies for the prefix products. The difference is that we start at a beta which is not the + // first one (index 0), but the one at index num_betas_per_thread. We process the high bits only. + // Example: If num_betas_per_thread = 3, we compute after the first iteration: + // (1, beta_3) + // 2nd iteration: (1, beta_3, beta_4, beta_3 * beta_4) + // 3nd iteration: (1, beta_3, beta_4, beta_3 * beta_4, beta_5, beta_3 * beta_5, beta_4 * beta_5, beta_3 * beta_4 + // * beta_5) + // etc .... + for (size_t beta_idx = num_betas_per_thread, window_size = 1; beta_idx < log_num_monomials; + beta_idx++, window_size <<= 1) { + const auto& beta = betas.at(beta_idx); + for (size_t j = 0; j < window_size; j++) { + thread_prefix_beta_products.at(window_size + j) = beta * thread_prefix_beta_products.at(j); + } + } - // TODO(https://github.com/AztecProtocol/barretenberg/issues/864): This computation is asymtotically slow as it - // does pow_size * log(pow_size) work. However, in practice, its super efficient because its trivially - // parallelizable and only takes 45ms for the whole 6 iter IVC benchmark. Its also very readable, so we're - // leaving it unoptimized for now. parallel_for(num_threads, [&](size_t thread_idx) { size_t start = thread_idx * iterations_per_thread; - size_t end = (thread_idx + 1) * iterations_per_thread; - for (size_t i = start; i < end; i++) { - auto res = FF(1); - for (size_t j = i, beta_idx = 0; j > 0; j >>= 1, beta_idx++) { - if ((j & 1) == 1) { - res *= betas[beta_idx]; - } + beta_products.at(start) = thread_prefix_beta_products.at(thread_idx); + + // Compute the suffix products for each thread + // Example: Assume we start with the prefix product = beta_3 * beta_5 + // After the first iteration, we get: (beta_3 * beta_5, beta_0 * beta_3 * beta_5) + // 2nd iteration: (beta_3 * beta_5, beta_0 * beta_3 * beta_5, beta_1 * beta_3 * beta_5, beta_0 * beta_1 * + // beta_3 * beta_5) + // etc ... + for (size_t beta_idx = 0, window_size = 1; beta_idx < num_betas_per_thread; beta_idx++, window_size <<= 1) { + const auto& beta = betas.at(beta_idx); + for (size_t j = 0; j < window_size; j++) { + beta_products.at(start + window_size + j) = beta * beta_products.at(start + j); } - beta_products[i] = res; } }); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp index 7abc30789dff..4741b570fa8f 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp @@ -6,6 +6,7 @@ #include "polynomial.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" @@ -33,22 +34,24 @@ SharedShiftedVirtualZeroesArray _clone(const SharedShiftedVirtualZeroesArray size_t left_expansion = 0) { size_t expanded_size = array.size() + right_expansion + left_expansion; - std::shared_ptr> backing_clone = BackingMemory::allocate(expanded_size); + BackingMemory backing_clone = BackingMemory::allocate(expanded_size); // zero any left extensions to the array - memset(static_cast(backing_clone->raw_data()), 0, sizeof(Fr) * left_expansion); + memset(static_cast(backing_clone.raw_data), 0, sizeof(Fr) * left_expansion); // copy our cloned array over - memcpy(static_cast(backing_clone->raw_data() + left_expansion), + memcpy(static_cast(backing_clone.raw_data + left_expansion), static_cast(array.data()), sizeof(Fr) * array.size()); // zero any right extensions to the array - memset( - static_cast(backing_clone->raw_data() + left_expansion + array.size()), 0, sizeof(Fr) * right_expansion); - return { array.start_ - left_expansion, array.end_ + right_expansion, array.virtual_size_, backing_clone }; + memset(static_cast(backing_clone.raw_data + left_expansion + array.size()), 0, sizeof(Fr) * right_expansion); + return { + array.start_ - left_expansion, array.end_ + right_expansion, array.virtual_size_, std::move(backing_clone) + }; } template void Polynomial::allocate_backing_memory(size_t size, size_t virtual_size, size_t start_index) { + BB_BENCH_NAME("Polynomial::allocate_backing_memory"); BB_ASSERT_LTE(start_index + size, virtual_size); coefficients_ = SharedShiftedVirtualZeroesArray{ start_index, /* start index, used for shifted polynomials and offset 'islands' of non-zeroes */ @@ -69,8 +72,7 @@ void Polynomial::allocate_backing_memory(size_t size, size_t virtual_size, s */ template Polynomial::Polynomial(size_t size, size_t virtual_size, size_t start_index) { - PROFILE_THIS_NAME("polynomial allocation with zeroing"); - + BB_BENCH_NAME("Polynomial::Polynomial(size_t, size_t, size_t)"); allocate_backing_memory(size, virtual_size, start_index); size_t num_threads = calculate_num_threads(size); @@ -96,7 +98,6 @@ template Polynomial::Polynomial(size_t size, size_t virtual_si template Polynomial::Polynomial(size_t size, size_t virtual_size, size_t start_index, [[maybe_unused]] DontZeroMemory flag) { - PROFILE_THIS_NAME("polynomial allocation without zeroing"); allocate_backing_memory(size, virtual_size, start_index); } @@ -282,20 +283,17 @@ template Polynomial& Polynomial::operator-=(PolynomialSpan template Polynomial& Polynomial::operator*=(const Fr scaling_factor) { - const size_t num_threads = calculate_num_threads(size()); - const size_t range_per_thread = size() / num_threads; - const size_t leftovers = size() - (range_per_thread * num_threads); - parallel_for(num_threads, [&](size_t j) { - const size_t offset = j * range_per_thread; - const size_t end = (j == num_threads - 1) ? offset + range_per_thread + leftovers : offset + range_per_thread; - for (size_t i = offset; i < end; ++i) { - data()[i] *= scaling_factor; - } - }); - + parallel_for([scaling_factor, this](const ThreadChunk& chunk) { multiply_chunk(chunk, scaling_factor); }); return *this; } +template void Polynomial::multiply_chunk(const ThreadChunk& chunk, const Fr scaling_factor) +{ + for (size_t i : chunk.range(size())) { + data()[i] *= scaling_factor; + } +} + template Polynomial Polynomial::create_non_parallel_zero_init(size_t size, size_t virtual_size) { Polynomial p(size, virtual_size, Polynomial::DontZeroMemory::FLAG); @@ -338,16 +336,18 @@ template void Polynomial::add_scaled(PolynomialSpan { BB_ASSERT_LTE(start_index(), other.start_index); BB_ASSERT_GTE(end_index(), other.end_index()); - const size_t num_threads = calculate_num_threads(other.size()); - const size_t range_per_thread = other.size() / num_threads; - const size_t leftovers = other.size() - (range_per_thread * num_threads); - parallel_for(num_threads, [&](size_t j) { - const size_t offset = j * range_per_thread + other.start_index; - const size_t end = (j == num_threads - 1) ? offset + range_per_thread + leftovers : offset + range_per_thread; - for (size_t i = offset; i < end; ++i) { - at(i) += scaling_factor * other[i]; - } - }); + parallel_for( + [&other, scaling_factor, this](const ThreadChunk& chunk) { add_scaled_chunk(chunk, other, scaling_factor); }); +} + +template +void Polynomial::add_scaled_chunk(const ThreadChunk& chunk, PolynomialSpan other, Fr scaling_factor) & +{ + // Iterate over the chunk of the other polynomial's range + for (size_t offset : chunk.range(other.size())) { + size_t index = other.start_index + offset; + at(index) += scaling_factor * other[index]; + } } template Polynomial Polynomial::shifted() const diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index 54e776badb94..8d2d174c7625 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -6,8 +6,9 @@ #pragma once #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/mem.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/thread.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/common/zip_view.hpp" #include "barretenberg/constants.hpp" @@ -35,14 +36,14 @@ template struct PolynomialSpan { size_t size() const { return span.size(); } Fr& operator[](size_t index) { - BB_ASSERT_GTE(index, start_index); - BB_ASSERT_LT(index, end_index()); + ASSERT_DEBUG(index >= start_index); + ASSERT_DEBUG(index < end_index()); return span[index - start_index]; } const Fr& operator[](size_t index) const { - BB_ASSERT_GTE(index, start_index); - BB_ASSERT_LT(index, end_index()); + ASSERT_DEBUG(index >= start_index); + ASSERT_DEBUG(index < end_index()); return span[index - start_index]; } PolynomialSpan subspan(size_t offset, size_t length) @@ -79,7 +80,7 @@ template class Polynomial { Polynomial(size_t size, size_t virtual_size, size_t start_index = 0); // Intended just for plonk, where size == virtual_size always Polynomial(size_t size) - : Polynomial(size, size){}; + : Polynomial(size, size) {}; // Constructor that does not initialize values, use with caution to save time. Polynomial(size_t size, size_t virtual_size, size_t start_index, DontZeroMemory flag); @@ -101,10 +102,7 @@ template class Polynomial { {} /** - * @brief Utility to efficiently construct a shift from the original polynomial. - * - * @param virtual_size the size of the polynomial to be shifted - * @return Polynomial + * @brief Utility to create a shiftable polynomial of given virtual size. */ static Polynomial shiftable(size_t virtual_size) { @@ -251,6 +249,8 @@ template class Polynomial { */ void add_scaled(PolynomialSpan other, Fr scaling_factor) &; + void add_scaled_chunk(const ThreadChunk& chunk, PolynomialSpan other, Fr scaling_factor) &; + /** * @brief adds the polynomial q(X) 'other'. * @@ -272,6 +272,8 @@ template class Polynomial { */ Polynomial& operator*=(Fr scaling_factor); + void multiply_chunk(const ThreadChunk& chunk, Fr scaling_factor); + /** * @brief Add random values to the coefficients of a polynomial. In practice, this is used for ensuring the * commitment and evaluation of a polynomial don't leak information about the coefficients in the context of zero @@ -311,7 +313,7 @@ template class Polynomial { static Polynomial random(size_t size, size_t start_index = 0) { - PROFILE_THIS_NAME("generate random polynomial"); + BB_BENCH_NAME("generate random polynomial"); return random(size - start_index, size, start_index); } diff --git a/barretenberg/cpp/src/barretenberg/polynomials/row_disabling_polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/row_disabling_polynomial.hpp index 57d37bc004b5..a9582b194360 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/row_disabling_polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/row_disabling_polynomial.hpp @@ -5,8 +5,8 @@ // ===================== #pragma once +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/compiler_hints.hpp" -#include "barretenberg/common/op_count.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/stdlib/primitives/bool/bool.hpp" diff --git a/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp b/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp index 2bbfef87bd50..e707ffc70503 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp @@ -37,8 +37,8 @@ template struct SharedShiftedVirtualZeroesArray { */ void set(size_t index, const T& value) { - BB_ASSERT_GTE(index, start_); - BB_ASSERT_LT(index, end_); + ASSERT_DEBUG(index >= start_); + ASSERT_DEBUG(index < end_); data()[index - start_] = value; } @@ -56,15 +56,7 @@ template struct SharedShiftedVirtualZeroesArray { const T& get(size_t index, size_t virtual_padding = 0) const { static const T zero{}; - if (index >= virtual_size_ + virtual_padding) { - info("BAD GET(): index = ", - index, - ", virtual_size_ = ", - virtual_size_, - ", virtual_padding = ", - virtual_padding); - } - BB_ASSERT_LT(index, virtual_size_ + virtual_padding); + ASSERT_DEBUG(index < virtual_size_ + virtual_padding); if (index >= start_ && index < end_) { return data()[index - start_]; } @@ -77,8 +69,8 @@ template struct SharedShiftedVirtualZeroesArray { * * @return A pointer to the beginning of the memory-backed range. */ - T* data() { return backing_memory_ ? backing_memory_->raw_data() : nullptr; } - const T* data() const { return backing_memory_ ? backing_memory_->raw_data() : nullptr; } + T* data() { return backing_memory_.raw_data; } + const T* data() const { return backing_memory_.raw_data; } // Our size is end_ - start_. Note that we need to offset end_ when doing a shift to // correctly maintain the size. size_t size() const { return end_ - start_; } @@ -93,15 +85,15 @@ template struct SharedShiftedVirtualZeroesArray { T& operator[](size_t index) { - BB_ASSERT_GTE(index, start_); - BB_ASSERT_LT(index, end_); + ASSERT_DEBUG(index >= start_); + ASSERT_DEBUG(index < end_); return data()[index - start_]; } // get() is more useful, but for completeness with the non-const operator[] const T& operator[](size_t index) const { - BB_ASSERT_GTE(index, start_); - BB_ASSERT_LT(index, end_); + ASSERT_DEBUG(index >= start_); + ASSERT_DEBUG(index < end_); return data()[index - start_]; } @@ -135,10 +127,10 @@ template struct SharedShiftedVirtualZeroesArray { size_t virtual_size_ = 0; /** - * @brief Shared pointer to the underlying memory array. + * @brief The underlying memory storage. * - * The memory is allocated for at least the range [start_, end_). It is shared across instances to allow - * for efficient memory use when arrays are shifted or otherwise manipulated. + * The memory is allocated for at least the range [start_, end_). Shared pointers within BackingMemory + * allow for efficient memory use when arrays are shifted or otherwise manipulated. */ - std::shared_ptr> backing_memory_; + BackingMemory backing_memory_; }; diff --git a/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp b/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp index 094c7d439a6e..228de6fe30e6 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp @@ -69,6 +69,7 @@ template Univariate(UnivariateCoefficientBasis monomial) { static_assert(domain_start == 0); @@ -86,6 +87,7 @@ template Univariate(UnivariateCoefficientBasis monomial) { static_assert(domain_start == 0); @@ -779,6 +781,27 @@ template +Univariate operator+( + const Fr& ff, const UnivariateView& uv) +{ + return uv + ff; +} + +template +Univariate operator-( + const Fr& ff, const UnivariateView& uv) +{ + return -uv + ff; +} + +template +Univariate operator*( + const Fr& ff, const UnivariateView& uv) +{ + return uv * ff; +} + /** * @brief Create a sub-array of `elements` at the indices given in the template pack `Is`, converting them * to the new type T. diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp index 553e08d6f000..222297a2a9c1 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp @@ -1,8 +1,8 @@ #include "barretenberg/flavor/mega_flavor.hpp" #include "barretenberg/honk/utils/testing.hpp" +#include "barretenberg/protogalaxy/constants.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" #include "barretenberg/relations/ultra_arithmetic_relation.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include using namespace bb; @@ -10,7 +10,6 @@ using namespace bb; using Flavor = MegaFlavor; using Polynomial = typename Flavor::Polynomial; using FF = typename Flavor::FF; -constexpr size_t NUM_KEYS = 2; /** * @brief Extend the ProtogalaxyProverInternal class to compute the combiner *without* optimistically skipping @@ -22,13 +21,12 @@ constexpr size_t NUM_KEYS = 2; * we can test whether the optimistic skipping algorithm produces the correct result * */ -class PGInternalTest : public ProtogalaxyProverInternal> { +class PGInternalTest : public ProtogalaxyProverInternal> { public: using ExtendedUnivariatesNoOptimisticSkipping = typename Flavor::template ProverUnivariates; - using UnivariateRelationParametersNoOptimisticSkipping = - bb::RelationParameters>; + using UnivariateRelationParametersNoOptimisticSkipping = bb::RelationParameters>; using ExtendedUnivariatesTypeNoOptimisticSkipping = std::conditional_t; @@ -37,7 +35,7 @@ class PGInternalTest : public ProtogalaxyProverInternal& gate_separators, const UnivariateRelationParametersNoOptimisticSkipping& relation_parameters, const UnivariateSubrelationSeparators& alphas) @@ -59,13 +57,13 @@ class PGInternalTest : public ProtogalaxyProverInternal& gate_separators, const UnivariateRelationParametersNoOptimisticSkipping& relation_parameters, const UnivariateSubrelationSeparators& alphas, TupleOfTuplesOfUnivariatesNoOptimisticSkipping& univariate_accumulators) { - PROFILE_THIS(); + BB_BENCH(); // Determine the number of threads over which to distribute the work // The polynomial size is given by the virtual size since the computation includes @@ -73,7 +71,7 @@ class PGInternalTest : public ProtogalaxyProverInternalpolynomials.w_l.virtual_size(); const size_t num_threads = compute_num_threads(common_polynomial_size); - // Univariates are optimised for usual PG, but we need the unoptimised version for tests (it's a version that + // Univariates are optimized for usual PG, but we need the unoptimized version for tests (it's a version that // doesn't skip computation), so we need to define types depending on the template instantiation using ThreadAccumulators = TupleOfTuplesOfUnivariatesNoOptimisticSkipping; // Construct univariate accumulator containers; one per thread @@ -92,7 +90,7 @@ class PGInternalTest : public ProtogalaxyProverInternal(extended_univariates, keys, idx); @@ -117,7 +115,7 @@ class PGInternalTest : public ProtogalaxyProverInternal; - using DeciderProvingKeys = DeciderProvingKeys_; + using ProverInstance = ProverInstance_; const auto restrict_to_standard_arithmetic_relation = [](auto& polys) { std::fill(polys.q_arith.coeffs().begin(), polys.q_arith.coeffs().end(), 1); @@ -195,19 +192,18 @@ TEST(Protogalaxy, CombinerOn2Keys) // Combiner test on prover polynomials containing random values, restricted to only the standard arithmetic // relation. if (is_random_input) { - std::vector> keys_data(NUM_KEYS); + std::array, bb::NUM_INSTANCES> keys; - for (size_t idx = 0; idx < NUM_KEYS; idx++) { - auto key = std::make_shared(); + for (size_t idx = 0; idx < bb::NUM_INSTANCES; idx++) { + auto key = std::make_shared(); auto prover_polynomials = get_sequential_prover_polynomials( /*log_circuit_size=*/1, idx * 128); restrict_to_standard_arithmetic_relation(prover_polynomials); key->polynomials = std::move(prover_polynomials); key->set_dyadic_size(2); - keys_data[idx] = key; + keys[idx] = key; } - DeciderProvingKeys keys{ keys_data }; PGInternalTest::UnivariateSubrelationSeparators alphas; alphas.fill(bb::Univariate(FF(0))); // focus on the arithmetic relation only GateSeparatorPolynomial gate_separators({ 2 }, /*log_num_monomials=*/1); @@ -229,19 +225,18 @@ TEST(Protogalaxy, CombinerOn2Keys) 9171435464UL }); EXPECT_EQ(result_no_skipping, expected_result); } else { - std::vector> keys_data(NUM_KEYS); + std::array, bb::NUM_INSTANCES> keys; - for (size_t idx = 0; idx < NUM_KEYS; idx++) { - auto key = std::make_shared(); + for (size_t idx = 0; idx < bb::NUM_INSTANCES; idx++) { + auto key = std::make_shared(); auto prover_polynomials = get_zero_prover_polynomials( /*log_circuit_size=*/1); restrict_to_standard_arithmetic_relation(prover_polynomials); key->polynomials = std::move(prover_polynomials); key->set_dyadic_size(2); - keys_data[idx] = key; + keys[idx] = key; } - DeciderProvingKeys keys{ keys_data }; PGInternalTest::UnivariateSubrelationSeparators alphas; alphas.fill(bb::Univariate(FF(0))); // focus on the arithmetic relation only @@ -270,7 +265,7 @@ TEST(Protogalaxy, CombinerOn2Keys) restrict_to_standard_arithmetic_relation(keys[0]->polynomials); restrict_to_standard_arithmetic_relation(keys[1]->polynomials); - /* DeciderProvingKey 0 DeciderProvingKey 1 + /* ProverInstance 0 ProverInstance 1 w_l w_r w_o q_m q_l q_r q_o q_c w_l w_r w_o q_m q_l q_r q_o q_c 1 2 3 0 1 1 -1 0 3 4 7 0 1 1 -1 0 0 4 4 0 1 1 -1 0 1 4 4 1 0 0 -1 0 */ @@ -310,8 +305,7 @@ TEST(Protogalaxy, CombinerOn2Keys) // Check that the optimized combiner computation yields a result consistent with the unoptimized version TEST(Protogalaxy, CombinerOptimizationConsistency) { - using DeciderProvingKey = DeciderProvingKey_; - using DeciderProvingKeys = DeciderProvingKeys_; + using ProverInstance = ProverInstance_; using UltraArithmeticRelation = UltraArithmeticRelation; constexpr size_t UNIVARIATE_LENGTH = 12; @@ -333,20 +327,18 @@ TEST(Protogalaxy, CombinerOptimizationConsistency) // Combiner test on prover polynomisls containing random values, restricted to only the standard arithmetic // relation. if (is_random_input) { - std::vector> keys_data(NUM_KEYS); - ASSERT_EQ(NUM_KEYS, 2U); // Don't want to handle more here + std::array, bb::NUM_INSTANCES> keys; - for (size_t idx = 0; idx < NUM_KEYS; idx++) { - auto key = std::make_shared(); + for (size_t idx = 0; idx < bb::NUM_INSTANCES; idx++) { + auto key = std::make_shared(); auto prover_polynomials = get_sequential_prover_polynomials( /*log_circuit_size=*/1, idx * 128); restrict_to_standard_arithmetic_relation(prover_polynomials); key->polynomials = std::move(prover_polynomials); key->set_dyadic_size(2); - keys_data[idx] = key; + keys[idx] = key; } - DeciderProvingKeys keys{ keys_data }; PGInternalTest::UnivariateSubrelationSeparators alphas; alphas.fill(bb::Univariate(FF(0))); // focus on the arithmetic relation only GateSeparatorPolynomial gate_separators({ 2 }, /*log_num_monomials=*/1); @@ -361,25 +353,25 @@ TEST(Protogalaxy, CombinerOptimizationConsistency) // Accumulate arithmetic relation over 2 rows on the second key for (size_t i = 0; i < 2; i++) { UltraArithmeticRelation::accumulate(std::get<0>(temporary_accumulator), - keys_data[NUM_KEYS - 1]->polynomials.get_row(i), + keys[bb::NUM_INSTANCES - 1]->polynomials.get_row(i), relation_parameters, gate_separators[i]); } // Get the result of the 0th subrelation of the arithmetic relation FF key_offset = std::get<0>(temporary_accumulator)[0]; // Subtract it from q_c[0] (it directly affect the target sum, making it zero and enabling the optimisation) - keys_data[1]->polynomials.q_c.at(0) -= key_offset; + keys[1]->polynomials.q_c.at(0) -= key_offset; std::vector extended_polynomials; // These hold the extensions of prover polynomials // Manually extend all polynomials. Create new ProverPolynomials from extended values - for (size_t idx = NUM_KEYS; idx < UNIVARIATE_LENGTH; idx++) { + for (size_t idx = bb::NUM_INSTANCES; idx < UNIVARIATE_LENGTH; idx++) { - auto key = std::make_shared(); + auto key = std::make_shared(); auto prover_polynomials = get_zero_prover_polynomials(1); for (auto [key_0_polynomial, key_1_polynomial, new_polynomial] : - zip_view(keys_data[0]->polynomials.get_all(), - keys_data[1]->polynomials.get_all(), + zip_view(keys[0]->polynomials.get_all(), + keys[1]->polynomials.get_all(), prover_polynomials.get_all())) { for (size_t i = 0; i < /*circuit_size*/ 2; i++) { new_polynomial.at(i) = @@ -393,17 +385,17 @@ TEST(Protogalaxy, CombinerOptimizationConsistency) for (size_t idx = 0; idx < UNIVARIATE_LENGTH; idx++) { // Note: {} is required to initialize the tuple contents. Otherwise the values contain garbage. TupleOfArraysOfValues accumulator{}; - if (idx < NUM_KEYS) { + if (idx < bb::NUM_INSTANCES) { for (size_t i = 0; i < 2; i++) { UltraArithmeticRelation::accumulate(std::get<0>(accumulator), - keys_data[idx]->polynomials.get_row(i), + keys[idx]->polynomials.get_row(i), relation_parameters, gate_separators[i]); } } else { for (size_t i = 0; i < 2; i++) { UltraArithmeticRelation::accumulate(std::get<0>(accumulator), - extended_polynomials[idx - NUM_KEYS].get_row(i), + extended_polynomials[idx - bb::NUM_INSTANCES].get_row(i), relation_parameters, gate_separators[i]); } @@ -421,19 +413,18 @@ TEST(Protogalaxy, CombinerOptimizationConsistency) EXPECT_EQ(result_no_skipping, expected_result); EXPECT_EQ(result_with_skipping, expected_result); } else { - std::vector> keys_data(NUM_KEYS); + std::array, bb::NUM_INSTANCES> keys; - for (size_t idx = 0; idx < NUM_KEYS; idx++) { - auto key = std::make_shared(); + for (size_t idx = 0; idx < bb::NUM_INSTANCES; idx++) { + auto key = std::make_shared(); auto prover_polynomials = get_zero_prover_polynomials( /*log_circuit_size=*/1); restrict_to_standard_arithmetic_relation(prover_polynomials); key->polynomials = std::move(prover_polynomials); key->set_dyadic_size(2); - keys_data[idx] = key; + keys[idx] = key; } - DeciderProvingKeys keys{ keys_data }; PGInternalTest::UnivariateSubrelationSeparators alphas; alphas.fill(bb::Univariate(FF(0))); // focus on the arithmetic relation only @@ -462,7 +453,7 @@ TEST(Protogalaxy, CombinerOptimizationConsistency) restrict_to_standard_arithmetic_relation(keys[0]->polynomials); restrict_to_standard_arithmetic_relation(keys[1]->polynomials); - /* DeciderProvingKey 0 DeciderProvingKey 1 + /* ProverInstance 0 ProverInstance 1 w_l w_r w_o q_m q_l q_r q_o q_c w_l w_r w_o q_m q_l q_r q_o q_c 1 2 3 0 1 1 -1 0 3 4 7 0 1 1 -1 0 0 4 4 0 1 1 -1 0 1 4 4 1 0 0 -1 0 */ diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/constants.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/constants.hpp new file mode 100644 index 000000000000..550b8d19a561 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/constants.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include "barretenberg/flavor/flavor_concepts.hpp" +#include + +namespace bb { + +/** + * We collect the constants pertaining to Protogalaxy + * + */ + +// Number of instances to be folded +static constexpr std::size_t NUM_INSTANCES = 2; + +// Number of coefficients whose calculation is to be skipped in the calculation of the combiner +static constexpr std::size_t SKIP_COUNT = NUM_INSTANCES - 1; + +/** + * Write \f$\omega_0, \dots, \omega_k\f$, for a series of prover instances. Each instance is given by + * \f$\omega_i = (p_{1,i}, \dots, p_{M,i}, \alpha_{1,i}, \dots, \alpha_{N,i}, \theta_{1,i}, \dots, \theta_{6,i})\f$, + * where \f$p_{j,i}\f$ are the prover polynomials, \f$\alpha_{j,i}\f$ are the batching challenges, and + * \f$\theta_{j,i}\f$ are the relation parameters. + * + * To fold these instances together we need to compute the combiner polynomial \f$G\f$ as defined in the Protogalaxy + * paper. This polynomial is defined as + * \f[ + * G(X) = \sum_{1}^{2^n} pow(\beta^{\ast}) f_i( \sum_{j=0}^k L_j(X) \omega_j ) + * \f] + * where \f$n\f$ is the dyadic size of the circuit from which the instances are derived. We now compute its + * degree. + * + * If \f$R_1, \dots, R_N\f$ are the polynomials defining all the subrelations that make up the relations listed in + * Flavor::Relations_, then for a ProverInstance \f$\omega = (p_1, \dots, p_M, \theta_1, \dots, \theta_6, \alpha_1, + * \dots, \alpha_N)\f$ we have + * \f[ + * f_i(\omega) = \sum_{i=1}^{2^n} \alpha_i R_i(p_1, \dots, p_M, \theta_1, \dots, \theta_6) + * \f] + * + * Replacing \f$\omega\f$ with \f$\sum_{j=0}^k L_j(X) \omega_j\f$, we get + * \f[ + * f_i(\sum_{j=0}^k L_i(X) \omega_i) = \sum_{i=1}^N + * (\sum_{j=0}^k L_j(X) \alpha_{i,j}) * R_i(\sum_{j=0}^k L_j(X) p_{1,j}, \dots, \sum_{j=0}^k L_j(X) + * \theta_{6,j}) + * \f] + * + * The constant Flavor::MAX_TOTAL_RELATION_LENGTH is equal to 1 plus the maximum of the degrees of the \f$R_i\f$'s, + * where the \f$\theta_i\f$'s are regarded as variables. The polynomials \f$L_j\f$ have degree \f$k\f$. Hence + * - The maximum degree of a folded subrelation polynomial (with the relation parameters regarded as variables) is + * (Flavor::MAX_TOTAL_RELATION_LENGTH - 1) * k + * - The degree of \f$f_i\f$ is: (Flavor::MAX_TOTAL_RELATION_LENGTH - 1 + k) * k + * + * For k = 2 the above formulas become: + * - EXTENDED_LENGTH = number of evaluations needed to determine a folded subrelation + * = Flavor::MAX_TOTAL_RELATION_LENGTH + * - BATCHED_EXTENDED_LENGTH = number of evaluations needed to determine the combiner + * = Flavor::MAX_TOTAL_RELATION_LENGTH + 1 + */ +template + requires(IsMegaFlavor || IsUltraOrMegaHonk) +static constexpr size_t computed_extended_length() +{ + return Flavor::MAX_TOTAL_RELATION_LENGTH; +} +/** + * @brief Compute the number of evaluation neeeded to represent the combiner polynomial (\f$G\f$ in the Protogalaxy + * paper). See the documentation for computed_extended_length() for the calculation. + * + * @tparam Flavor + */ +template + requires(IsMegaFlavor || IsUltraOrMegaHonk) +static constexpr size_t computed_batched_extended_length() +{ + return Flavor::MAX_TOTAL_RELATION_LENGTH + 1; +} + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/folding_result.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_result.hpp index 259bd79bc3b8..4ec1f6601b43 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/folding_result.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_result.hpp @@ -7,15 +7,15 @@ #pragma once #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/relations/relation_parameters.hpp" -#include "barretenberg/ultra_honk/decider_proving_key.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" namespace bb { /** - * @brief The result of running the Protogalaxy prover containing a new accumulator as well as the proof data to - * instantiate the verifier transcript. + * @brief The result of one iteraton of Protogalaxy proving, containing a new accumulator as well as the proof data to + * instantiate the verifier transcript asserting that the accumulator has been correctly constructed. */ template struct FoldingResult { public: - std::shared_ptr> accumulator; + std::shared_ptr> accumulator; std::vector proof; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp index 737200880840..993faac3e072 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp @@ -21,10 +21,9 @@ namespace bb { * set, compute_row_evaluations will operate on all rows. */ template -static bool check_accumulator_target_sum_manual(const std::shared_ptr>& accumulator) +static bool check_accumulator_target_sum_manual(const std::shared_ptr>& accumulator) { - using DeciderProvingKeys = DeciderProvingKeys_; - using PGInternal = ProtogalaxyProverInternal; + using PGInternal = ProtogalaxyProverInternal>; const size_t accumulator_size = accumulator->dyadic_size(); PGInternal pg_internal; diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp index 21eb15a917c1..8ff28d4bf9e9 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp @@ -21,10 +21,8 @@ auto& engine = numeric::get_debug_randomness(); template class ProtogalaxyTests : public testing::Test { public: using VerificationKey = typename Flavor::VerificationKey; - using DeciderProvingKey = DeciderProvingKey_; - using DeciderProvingKeys = DeciderProvingKeys_; - using DeciderVerificationKey = DeciderVerificationKey_; - using DeciderVerificationKeys = DeciderVerificationKeys_; + using ProverInstance = ProverInstance_; + using VerifierInstance = VerifierInstance_; using ProtogalaxyProver = ProtogalaxyProver_; using FF = typename Flavor::FF; using Affine = typename Flavor::Commitment; @@ -39,11 +37,12 @@ template class ProtogalaxyTests : public testing::Test { using DeciderProver = DeciderProver_; using DeciderVerifier = DeciderVerifier_; using FoldingProver = ProtogalaxyProver_; - using FoldingVerifier = ProtogalaxyVerifier_; - using PGInternal = ProtogalaxyProverInternal; + using FoldingVerifier = ProtogalaxyVerifier_; + using PGInternal = ProtogalaxyProverInternal; + using ProverInstances = ProtogalaxyProver::ProverInstances; + using VerifierInstances = std::array, NUM_INSTANCES>; - using TupleOfKeys = std::tuple>, - std::vector>>; + using TupleOfKeys = std::tuple; static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } @@ -57,14 +56,17 @@ template class ProtogalaxyTests : public testing::Test { } // Construct decider keys for a provided circuit and add to tuple - static void construct_keys(TupleOfKeys& keys, Builder& builder, TraceSettings trace_settings = TraceSettings{}) + static void construct_tuple_of_keys(TupleOfKeys& keys, + Builder& builder, + size_t idx = 0, + TraceSettings trace_settings = TraceSettings{}) { - auto decider_proving_key = std::make_shared(builder, trace_settings); - auto verification_key = std::make_shared(decider_proving_key->get_precomputed()); - auto decider_verification_keys = std::make_shared(verification_key); - get<0>(keys).emplace_back(decider_proving_key); - get<1>(keys).emplace_back(decider_verification_keys); + auto prover_instance = std::make_shared(builder, trace_settings); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + auto verifier_instances = std::make_shared(verification_key); + get<0>(keys)[idx] = prover_instance; + get<1>(keys)[idx] = verifier_instances; } // Construct a given numer of decider key pairs @@ -76,17 +78,17 @@ template class ProtogalaxyTests : public testing::Test { auto builder = typename Flavor::CircuitBuilder(); construct_circuit(builder); - construct_keys(keys, builder, trace_settings); + construct_tuple_of_keys(keys, builder, idx, trace_settings); } return keys; } - static std::tuple, std::shared_ptr> fold_and_verify( - const std::vector>& proving_keys, - const std::vector>& verification_keys, + static std::tuple, std::shared_ptr> fold_and_verify( + const ProverInstances& prover_instances, + const VerifierInstances& verification_keys, ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker{}) { - FoldingProver folding_prover(proving_keys, + FoldingProver folding_prover(prover_instances, verification_keys, std::make_shared(), trace_usage_tracker); @@ -97,8 +99,8 @@ template class ProtogalaxyTests : public testing::Test { return { prover_accumulator, verifier_accumulator }; } - static void decide_and_verify(const std::shared_ptr& prover_accumulator, - const std::shared_ptr& verifier_accumulator, + static void decide_and_verify(const std::shared_ptr& prover_accumulator, + const std::shared_ptr& verifier_accumulator, bool expected_result) { DeciderProver decider_prover(prover_accumulator); @@ -121,16 +123,16 @@ template class ProtogalaxyTests : public testing::Test { auto builder = typename Flavor::CircuitBuilder(); construct_circuit(builder); - auto decider_pk = std::make_shared(builder); + auto prover_inst = std::make_shared(builder); - WitnessComputation::complete_proving_key_for_test(decider_pk); + WitnessComputation::complete_prover_instance_for_test(prover_inst); - for (auto& alpha : decider_pk->alphas) { + for (auto& alpha : prover_inst->alphas) { alpha = FF::random_element(); } PGInternal pg_internal; auto full_honk_evals = pg_internal.compute_row_evaluations( - decider_pk->polynomials, decider_pk->alphas, decider_pk->relation_parameters); + prover_inst->polynomials, prover_inst->alphas, prover_inst->relation_parameters); // Evaluations should be 0 for valid circuit for (const auto& eval : full_honk_evals.coeffs()) { @@ -198,7 +200,7 @@ template class ProtogalaxyTests : public testing::Test { target_sum += full_honk_evals[i] * gate_separators[i]; } - auto accumulator = std::make_shared(); + auto accumulator = std::make_shared(); accumulator->polynomials = std::move(full_polynomials); accumulator->set_dyadic_size(1 << log_size); accumulator->gate_challenges = betas; @@ -206,7 +208,10 @@ template class ProtogalaxyTests : public testing::Test { accumulator->relation_parameters = relation_parameters; accumulator->alphas = alphas; - auto deltas = compute_round_challenge_pows(log_size, FF::random_element()); + std::vector deltas(log_size); + for (size_t idx = 0; idx < log_size; idx++) { + deltas[idx] = FF::random_element(); + } auto perturbator = pg_internal.compute_perturbator(accumulator, deltas); // Ensure the constant coefficient of the perturbator is equal to the target sum as indicated by the paper @@ -224,7 +229,7 @@ template class ProtogalaxyTests : public testing::Test { auto combiner = bb::Univariate(std::array{ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }); auto combiner_quotient = PGInternal::compute_combiner_quotient(perturbator_evaluation, combiner); - // K(i) = (G(i) - ( L_0(i) * F(\alpha)) / Z(i), i = {2,.., 13} for DeciderProvingKeys::NUM = 2 + // K(i) = (G(i) - ( L_0(i) * F(\alpha)) / Z(i), i = {2,.., 13} for ProverInstances::NUM = 2 // K(i) = (G(i) - (1 - i) * F(\alpha)) / i * (i - 1) auto expected_evals = bb::Univariate(std::array{ (FF(22) - (FF(1) - FF(2)) * perturbator_evaluation) / (FF(2) * FF(2 - 1)), @@ -253,16 +258,16 @@ template class ProtogalaxyTests : public testing::Test { { Builder builder1; stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder1); - auto pk_1 = std::make_shared(builder1); + auto pk_1 = std::make_shared(builder1); pk_1->relation_parameters.eta = 1; Builder builder2; builder2.add_variable(3); stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder2); - auto pk_2 = std::make_shared(builder2); + auto pk_2 = std::make_shared(builder2); pk_2->relation_parameters.eta = 3; - DeciderProvingKeys pks{ { pk_1, pk_2 } }; + ProverInstances pks{ { pk_1, pk_2 } }; auto relation_parameters_no_optimistic_skipping = PGInternal::template compute_extended_relation_parameters< typename PGInternal::UnivariateRelationParametersNoOptimisticSkipping>(pks); auto relation_parameters = PGInternal::template compute_extended_relation_parameters< @@ -270,7 +275,7 @@ template class ProtogalaxyTests : public testing::Test { bb::Univariate expected_eta{ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21 } }; EXPECT_EQ(relation_parameters_no_optimistic_skipping.eta, expected_eta); - // Optimised relation parameters are the same, we just don't compute any values for non-used indices when + // Optimized relation parameters are the same, we just don't compute any values for non-used indices when // deriving values from them for (size_t i = 0; i < 11; i++) { EXPECT_EQ(relation_parameters.eta.evaluations[i], expected_eta.evaluations[i]); @@ -278,23 +283,23 @@ template class ProtogalaxyTests : public testing::Test { } /** - * @brief Given two dummy decider proving_keys with the batching challenges alphas set (one for each subrelation) - * ensure combining them in a univariate of desired length works as expected. + * @brief Given two dummy decider prover_instances with the batching challenges alphas set (one for each + * subrelation) ensure combining them in a univariate of desired length works as expected. */ static void test_compute_and_extend_alphas() { Builder builder1; stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder1); - auto pk_1 = std::make_shared(builder1); + auto pk_1 = std::make_shared(builder1); pk_1->alphas.fill(2); Builder builder2; builder2.add_variable(3); stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder2); - auto pk_2 = std::make_shared(builder2); + auto pk_2 = std::make_shared(builder2); pk_2->alphas.fill(4); - DeciderProvingKeys pks{ { pk_1, pk_2 } }; + ProverInstances pks{ { pk_1, pk_2 } }; auto alphas = PGInternal::compute_and_extend_alphas(pks); bb::Univariate expected_alphas{ { 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 } }; @@ -313,8 +318,8 @@ template class ProtogalaxyTests : public testing::Test { auto check_fold_and_decide = [](Builder& circuit_1, Builder& circuit_2) { // Construct decider key pairs for each TupleOfKeys keys; - construct_keys(keys, circuit_1); - construct_keys(keys, circuit_2); + construct_tuple_of_keys(keys, circuit_1, 0); + construct_tuple_of_keys(keys, circuit_2, 1); // Perform prover and verifier folding auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys), get<1>(keys)); @@ -387,16 +392,16 @@ template class ProtogalaxyTests : public testing::Test { // Erroneously set a non-zero wire value to zero in one of the lookup gates for (auto& wire_3_witness_idx : builder1.blocks.lookup.w_o()) { - if (wire_3_witness_idx != builder1.zero_idx) { - wire_3_witness_idx = builder1.zero_idx; + if (wire_3_witness_idx != builder1.zero_idx()) { + wire_3_witness_idx = builder1.zero_idx(); break; } } // Construct the key pairs for each TupleOfKeys keys; - construct_keys(keys, builder1); - construct_keys(keys, builder2); + construct_tuple_of_keys(keys, builder1, 0); + construct_tuple_of_keys(keys, builder2, 1); // Perform prover and verifier folding auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys), get<1>(keys)); @@ -418,7 +423,8 @@ template class ProtogalaxyTests : public testing::Test { TupleOfKeys insts_2 = construct_keys(1); // just one key pair auto [prover_accumulator_2, verifier_accumulator_2] = - fold_and_verify({ prover_accumulator, get<0>(insts_2)[0] }, { verifier_accumulator, get<1>(insts_2)[0] }); + fold_and_verify(ProverInstances{ prover_accumulator, get<0>(insts_2)[0] }, + VerifierInstances{ verifier_accumulator, get<1>(insts_2)[0] }); EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); @@ -439,7 +445,8 @@ template class ProtogalaxyTests : public testing::Test { TupleOfKeys keys_2 = construct_keys(1, trace_settings); // just one key pair auto [prover_accumulator_2, verifier_accumulator_2] = - fold_and_verify({ prover_accumulator, get<0>(keys_2)[0] }, { verifier_accumulator, get<1>(keys_2)[0] }); + fold_and_verify(ProverInstances{ prover_accumulator, get<0>(keys_2)[0] }, + VerifierInstances{ verifier_accumulator, get<1>(keys_2)[0] }); EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); info(prover_accumulator_2->dyadic_size()); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); @@ -458,8 +465,8 @@ template class ProtogalaxyTests : public testing::Test { TraceSettings trace_settings{ SMALL_TEST_STRUCTURE_FOR_OVERFLOWS, overflow_capacity }; ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker(trace_settings); - std::vector> decider_pks; - std::vector> decider_vks; + ProverInstances prover_insts; + VerifierInstances verifier_insts; // define parameters for two circuits; the first fits within the structured trace, the second overflows const std::vector log2_num_gates = { 14, 18 }; @@ -469,20 +476,20 @@ template class ProtogalaxyTests : public testing::Test { MockCircuits::add_arithmetic_gates(builder, 1 << log2_num_gates[i]); stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); - auto decider_proving_key = std::make_shared(builder, trace_settings); + auto prover_instance = std::make_shared(builder, trace_settings); trace_usage_tracker.update(builder); - auto verification_key = std::make_shared(decider_proving_key->get_precomputed()); - auto decider_verification_key = std::make_shared(verification_key); - decider_pks.push_back(decider_proving_key); - decider_vks.push_back(decider_verification_key); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + auto verifier_instance = std::make_shared(verification_key); + prover_insts[i] = prover_instance; + verifier_insts[i] = verifier_instance; } // Ensure the dyadic size of the first key is strictly less than that of the second - EXPECT_TRUE(decider_pks[0]->dyadic_size() < decider_pks[1]->dyadic_size()); + EXPECT_TRUE(prover_insts[0]->dyadic_size() < prover_insts[1]->dyadic_size()); // The size discrepency should be automatically handled by the PG prover via a virtual size increase const auto [prover_accumulator, verifier_accumulator] = - fold_and_verify(decider_pks, decider_vks, trace_usage_tracker); + fold_and_verify(prover_insts, verifier_insts, trace_usage_tracker); EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); decide_and_verify(prover_accumulator, verifier_accumulator, true); } @@ -512,8 +519,8 @@ template class ProtogalaxyTests : public testing::Test { // Construct the decider key pairs for the first two circuits TupleOfKeys keys_1; - construct_keys(keys_1, builder1, trace_settings); - construct_keys(keys_1, builder2, trace_settings); + construct_tuple_of_keys(keys_1, builder1, 0, trace_settings); + construct_tuple_of_keys(keys_1, builder2, 1, trace_settings); // Fold the first two pairs auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys_1), get<1>(keys_1)); @@ -521,11 +528,12 @@ template class ProtogalaxyTests : public testing::Test { // Construct the decider key pair for the third circuit TupleOfKeys keys_2; - construct_keys(keys_2, builder3, trace_settings); + construct_tuple_of_keys(keys_2, builder3, 0, trace_settings); // Fold 3rd pair of keys into their respective accumulators auto [prover_accumulator_2, verifier_accumulator_2] = - fold_and_verify({ prover_accumulator, get<0>(keys_2)[0] }, { verifier_accumulator, get<1>(keys_2)[0] }); + fold_and_verify(ProverInstances{ prover_accumulator, get<0>(keys_2)[0] }, + VerifierInstances{ verifier_accumulator, get<1>(keys_2)[0] }); EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); info(prover_accumulator_2->dyadic_size()); @@ -548,7 +556,8 @@ template class ProtogalaxyTests : public testing::Test { TupleOfKeys insts_2 = construct_keys(1); // just one decider key pair auto [prover_accumulator_2, verifier_accumulator_2] = - fold_and_verify({ prover_accumulator, get<0>(insts_2)[0] }, { verifier_accumulator, get<1>(insts_2)[0] }); + fold_and_verify(ProverInstances{ prover_accumulator, get<0>(insts_2)[0] }, + VerifierInstances{ verifier_accumulator, get<1>(insts_2)[0] }); EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, false); @@ -571,7 +580,8 @@ template class ProtogalaxyTests : public testing::Test { TupleOfKeys insts_2 = construct_keys(1); // just one decider key pair auto [prover_accumulator_2, verifier_accumulator_2] = - fold_and_verify({ prover_accumulator, get<0>(insts_2)[0] }, { verifier_accumulator, get<1>(insts_2)[0] }); + fold_and_verify(ProverInstances{ prover_accumulator, get<0>(insts_2)[0] }, + VerifierInstances{ verifier_accumulator, get<1>(insts_2)[0] }); EXPECT_EQ(prover_accumulator_2->target_sum == verifier_accumulator_2->target_sum, false); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, false); @@ -582,10 +592,8 @@ template class ProtogalaxyTests : public testing::Test { constexpr size_t total_insts = k + 1; TupleOfKeys insts = construct_keys(total_insts); - ProtogalaxyProver_ folding_prover( - get<0>(insts), get<1>(insts), std::make_shared()); - ProtogalaxyVerifier_> folding_verifier( - get<1>(insts), std::make_shared()); + ProtogalaxyProver_ folding_prover(get<0>(insts), get<1>(insts), std::make_shared()); + ProtogalaxyVerifier_ folding_verifier(get<1>(insts), std::make_shared()); auto [prover_accumulator, folding_proof] = folding_prover.prove(); auto verifier_accumulator = folding_verifier.verify_folding_proof(folding_proof); @@ -660,6 +668,7 @@ TYPED_TEST(ProtogalaxyTests, TamperedCommitment) TYPED_TEST(ProtogalaxyTests, TamperedAccumulatorPolynomial) { + BB_DISABLE_ASSERTS(); // Disable assert in PG prover TestFixture::test_tampered_accumulator_polynomial(); } diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp index a18adcfa2b38..127333ff67f3 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp @@ -7,43 +7,45 @@ #pragma once #include "barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp" #include "barretenberg/polynomials/univariate.hpp" +#include "barretenberg/protogalaxy/constants.hpp" #include "barretenberg/protogalaxy/folding_result.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" +#include "barretenberg/ultra_honk/verifier_instance.hpp" namespace bb { -// TODO(https://github.com/AztecProtocol/barretenberg/issues/1437): Change template params back to DeciderProvingKeys -template class ProtogalaxyProver_ { +// TODO(https://github.com/AztecProtocol/barretenberg/issues/1437): Change template params back to ProverInstances +// TODO(https://github.com/AztecProtocol/barretenberg/issues/1239): clean out broken support for multi-folding +template class ProtogalaxyProver_ { public: - using DeciderProvingKeys = DeciderProvingKeys_; - using DeciderVerificationKeys = DeciderVerificationKeys_; + static constexpr size_t NUM_SUBRELATIONS = Flavor::NUM_SUBRELATIONS; + static constexpr size_t EXTENDED_LENGTH = computed_extended_length(); + static constexpr size_t BATCHED_EXTENDED_LENGTH = computed_batched_extended_length(); + + using ProverInstance = ProverInstance_; + using VerifierInstance = VerifierInstance_; using FF = typename Flavor::FF; - using CombinerQuotient = Univariate; - using TupleOfTuplesOfUnivariates = typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariates; + using CombinerQuotient = Univariate; + using TupleOfTuplesOfUnivariates = typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariates; using UnivariateRelationParameters = - bb::RelationParameters>; - using UnivariateSubrelationSeparators = - std::array, Flavor::NUM_SUBRELATIONS - 1>; + bb::RelationParameters>; + using UnivariateSubrelationSeparators = std::array, NUM_SUBRELATIONS - 1>; using Transcript = typename Flavor::Transcript; - using DeciderPK = DeciderProvingKeys::DeciderPK; - using DeciderVK = DeciderVerificationKeys::DeciderVK; using CommitmentKey = typename Flavor::CommitmentKey; - using PGInternal = ProtogalaxyProverInternal; - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1239): clean out broken support for multi-folding - static_assert(DeciderProvingKeys::NUM == 2, "Protogalaxy currently only supports folding one instance at a time."); + using PGInternal = ProtogalaxyProverInternal; - static constexpr size_t NUM_SUBRELATIONS = DeciderProvingKeys::NUM_SUBRELATIONS; + using ProverInstances = std::array, NUM_INSTANCES>; + using VerifierInstances = std::array, NUM_INSTANCES>; - DeciderProvingKeys keys_to_fold; - DeciderVerificationKeys vks_to_fold; + ProverInstances prover_insts_to_fold; + VerifierInstances verifier_insts_to_fold; CommitmentKey commitment_key; // the state updated and carried forward beween rounds std::shared_ptr transcript = std::make_shared(); - std::shared_ptr accumulator; + std::shared_ptr accumulator; Polynomial perturbator; std::vector deltas; CombinerQuotient combiner_quotient; @@ -54,38 +56,35 @@ template class ProtogalaxyProver PGInternal pg_internal; ProtogalaxyProver_() = default; - ProtogalaxyProver_(const std::vector>& keys, - const std::vector>& vks, + ProtogalaxyProver_(const ProverInstances& prover_insts, + const VerifierInstances& verifier_insts, const std::shared_ptr& transcript, ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker{}) - : keys_to_fold(DeciderProvingKeys_(keys)) - , vks_to_fold(DeciderVerificationKeys_(vks)) - , commitment_key(keys_to_fold[1]->commitment_key) + : prover_insts_to_fold(prover_insts) + , verifier_insts_to_fold(verifier_insts) + , commitment_key(prover_insts_to_fold[1]->commitment_key) , transcript(transcript) , pg_internal(trace_usage_tracker) - { - BB_ASSERT_EQ(keys.size(), NUM_KEYS, "Number of prover keys does not match the number of keys to fold"); - BB_ASSERT_EQ( - vks.size(), NUM_KEYS, "Number of verification keys does not match the number of vks to Fiat-Shamir"); - } + {} /** - * @brief For each key produced by a circuit, prior to folding, we need to complete the computation of its - * prover polynomials; commit to witnesses and generate the relation parameters; and send the public data ϕ of - * the key to the verifier. + * @brief For each Prover instance derived from a circuit, prior to folding, we need to complete the computation of + * its polynomials (some of which require generating relation parameters first); commit to witnesses and generate + * the relation parameters; and send the public data ϕ of the instance to the verifier (which will represent the + * verifier instance). * * @param domain_separator a label used for tracking data in the transcript */ - void run_oink_prover_on_one_incomplete_key(std::shared_ptr, - std::shared_ptr, - const std::string& domain_separator); + void run_oink_prover_on_one_incomplete_instance(std::shared_ptr, + std::shared_ptr, + const std::string& domain_separator); /** * @brief Create inputs to folding protocol (an Oink interaction). - * @details Complete the decider pks that will be folded: complete computation of all the witness polynomials + * @details Complete all Prover instances that will be folded: complete computation of all the witness polynomials * and compute commitments. Send commitments to the verifier and retrieve challenges. */ - void run_oink_prover_on_each_incomplete_key(); + void run_oink_prover_on_each_incomplete_instance(); /** * @brief Steps 2 - 5 of the paper. @@ -94,26 +93,27 @@ template class ProtogalaxyProver * @param accumulator * @return std::tuple, Polynomial> deltas, perturbator */ - std::tuple, Polynomial> perturbator_round(const std::shared_ptr& accumulator); + std::tuple, Polynomial> perturbator_round( + const std::shared_ptr& accumulator); /** * @brief Steps 6 - 11 of the paper. - * @details Compute combiner (G polynomial in the paper) and then its quotient (K polynomial), whose coefficient + * @details Compute combiner (G polynomial in the paper) and then its quotient (K polynomial), whose coefficients * will be sent to the verifier. */ std::tuple, UnivariateSubrelationSeparators, UnivariateRelationParameters, FF, CombinerQuotient> combiner_quotient_round(const std::vector& gate_challenges, const std::vector& deltas, - const DeciderProvingKeys& keys); + const ProverInstances& instances); /** * @brief Steps 12 - 13 of the paper plus the prover folding work. - * @details Compute \f$ e^* \f$ plus, then update the prover accumulator by taking a Lagrange-linear combination of - * the current accumulator and the decider keys to be folded. In our mental model, we are doing a scalar - * multiplication of matrices whose columns are polynomials, as well as taking similar linear combinations of the - * relation parameters. + * @details Compute \f$ e^* \f$ (the new target sum), then update the prover accumulator by taking a + * Lagrange-linear combination of the current accumulator and the prover instances to be folded. In our mental + * model, we are doing a scalar multiplication of matrices whose columns are polynomials, as well as taking similar + * linear combinations of the relation parameters. */ - void update_target_sum_and_fold(const DeciderProvingKeys& keys, + void update_target_sum_and_fold(const ProverInstances& instances, const CombinerQuotient& combiner_quotient, const UnivariateSubrelationSeparators& alphas, const UnivariateRelationParameters& univariate_relation_parameters, @@ -122,9 +122,21 @@ template class ProtogalaxyProver /** * @brief Execute the folding prover. * - * @return FoldingResult is a pair consisting of an accumulator and a folding proof, which is a proof that the + * @return FoldingResult is a pair consisting of the new accumulator and a folding proof, which is a proof that the * accumulator was computed correctly. */ BB_PROFILE FoldingResult prove(); + + private: + /** + * @brief Get the maximum dyadic circuit size among all prover instances + * @return The maximum dyadic size + */ + size_t get_max_dyadic_size() const + { + return std::ranges::max(prover_insts_to_fold | std::views::transform([](const auto& inst) { + return inst != nullptr ? inst->dyadic_size() : 0; + })); + } }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp index 7976f40e9e05..cb1fd0e09a5c 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp @@ -5,7 +5,8 @@ // ===================== #pragma once -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" +#include "barretenberg/common/thread.hpp" #include "barretenberg/honk/relation_checker.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" #include "barretenberg/protogalaxy/prover_verifier_shared.hpp" @@ -14,62 +15,54 @@ #include "protogalaxy_prover.hpp" namespace bb { -template -void ProtogalaxyProver_::run_oink_prover_on_one_incomplete_key(std::shared_ptr key, - std::shared_ptr vk, - const std::string& domain_separator) +template +void ProtogalaxyProver_::run_oink_prover_on_one_incomplete_instance(std::shared_ptr key, + std::shared_ptr vk, + const std::string& domain_separator) { - PROFILE_THIS_NAME("ProtogalaxyProver::run_oink_prover_on_one_incomplete_key"); - OinkProver oink_prover(key, vk->vk, transcript, domain_separator + '_'); + BB_BENCH_NAME("ProtogalaxyProver::run_oink_prover_on_one_incomplete_instance"); + OinkProver oink_prover(key, vk->vk, transcript, domain_separator + '_'); oink_prover.prove(); } -template -void ProtogalaxyProver_::run_oink_prover_on_each_incomplete_key() +template void ProtogalaxyProver_::run_oink_prover_on_each_incomplete_instance() { - PROFILE_THIS_NAME("ProtogalaxyProver_::run_oink_prover_on_each_incomplete_key"); size_t idx = 0; - auto& key = keys_to_fold[0]; + auto& key = prover_insts_to_fold[0]; auto domain_separator = std::to_string(idx); - auto& verifier_accum = vks_to_fold[0]; - if (!key->is_accumulator) { - run_oink_prover_on_one_incomplete_key(key, verifier_accum, domain_separator); - key->target_sum = 0; - key->gate_challenges = std::vector(CONST_PG_LOG_N, 0); - } else { - // Fiat-Shamir the verifier accumulator - FF accum_hash = verifier_accum->add_hash_to_transcript("", *transcript); - info("Accumulator hash in PG prover: ", accum_hash); + auto& verifier_accum = verifier_insts_to_fold[0]; + if (!key->is_complete) { + run_oink_prover_on_one_incomplete_instance(key, verifier_accum, domain_separator); + // Get the gate challenges for sumcheck/combiner computation + key->gate_challenges = + transcript->template get_powers_of_challenge(domain_separator + "_gate_challenge", CONST_PG_LOG_N); } idx++; - for (auto it = keys_to_fold.begin() + 1; it != keys_to_fold.end(); it++, idx++) { + for (auto it = prover_insts_to_fold.begin() + 1; it != prover_insts_to_fold.end(); it++, idx++) { auto key = *it; auto domain_separator = std::to_string(idx); - run_oink_prover_on_one_incomplete_key(key, vks_to_fold[idx], domain_separator); + run_oink_prover_on_one_incomplete_instance(key, verifier_insts_to_fold[idx], domain_separator); } - accumulator = keys_to_fold[0]; + accumulator = prover_insts_to_fold[0]; }; -template -std::tuple, Polynomial> ProtogalaxyProver_:: - perturbator_round(const std::shared_ptr& accumulator) +template +std::tuple, Polynomial> ProtogalaxyProver_< + Flavor>::perturbator_round(const std::shared_ptr& accumulator) { - PROFILE_THIS_NAME("ProtogalaxyProver_::perturbator_round"); + BB_BENCH_NAME("ProtogalaxyProver_::perturbator_round"); - const FF delta = transcript->template get_challenge("delta"); - const std::vector deltas = compute_round_challenge_pows(CONST_PG_LOG_N, delta); + const std::vector deltas = transcript->template get_powers_of_challenge("delta", CONST_PG_LOG_N); // An honest prover with valid initial key computes that the perturbator is 0 in the first round - const Polynomial perturbator = accumulator->is_accumulator + const Polynomial perturbator = accumulator->from_first_instance ? pg_internal.compute_perturbator(accumulator, deltas) : Polynomial(CONST_PG_LOG_N + 1); // Prover doesn't send the constant coefficient of F because this is supposed to be equal to the target sum of // the accumulator which the folding verifier has from the previous iteration. - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1087): Verifier circuit for first IVC step is - // different for (size_t idx = 1; idx <= CONST_PG_LOG_N; idx++) { transcript->send_to_verifier("perturbator_" + std::to_string(idx), perturbator[idx]); } @@ -77,35 +70,36 @@ std::tuple, Polynomial> Pr return std::make_tuple(deltas, perturbator); }; -template +template std::tuple, - typename ProtogalaxyProver_::UnivariateSubrelationSeparators, - typename ProtogalaxyProver_::UnivariateRelationParameters, + typename ProtogalaxyProver_::UnivariateSubrelationSeparators, + typename ProtogalaxyProver_::UnivariateRelationParameters, typename Flavor::FF, - typename ProtogalaxyProver_::CombinerQuotient> -ProtogalaxyProver_::combiner_quotient_round(const std::vector& gate_challenges, - const std::vector& deltas, - const DeciderProvingKeys& keys) + typename ProtogalaxyProver_::CombinerQuotient> +ProtogalaxyProver_::combiner_quotient_round(const std::vector& gate_challenges, + const std::vector& deltas, + const ProverInstances& instances) { - PROFILE_THIS_NAME("ProtogalaxyProver_::combiner_quotient_round"); + BB_BENCH_NAME("ProtogalaxyProver_::combiner_quotient_round"); const FF perturbator_challenge = transcript->template get_challenge("perturbator_challenge"); const std::vector updated_gate_challenges = update_gate_challenges(perturbator_challenge, gate_challenges, deltas); - const UnivariateSubrelationSeparators alphas = PGInternal::compute_and_extend_alphas(keys); - const GateSeparatorPolynomial gate_separators{ updated_gate_challenges, CONST_PG_LOG_N }; + const UnivariateSubrelationSeparators alphas = PGInternal::compute_and_extend_alphas(instances); + const GateSeparatorPolynomial gate_separators{ updated_gate_challenges, + numeric::get_msb(get_max_dyadic_size()) }; const UnivariateRelationParameters relation_parameters = - PGInternal::template compute_extended_relation_parameters(keys); + PGInternal::template compute_extended_relation_parameters(instances); // Note: {} is required to initialize the tuple contents. Otherwise the univariates contain garbage. TupleOfTuplesOfUnivariates accumulators{}; - auto combiner = pg_internal.compute_combiner(keys, gate_separators, relation_parameters, alphas, accumulators); + auto combiner = pg_internal.compute_combiner(instances, gate_separators, relation_parameters, alphas, accumulators); const FF perturbator_evaluation = perturbator.evaluate(perturbator_challenge); const CombinerQuotient combiner_quotient = PGInternal::compute_combiner_quotient(perturbator_evaluation, combiner); - for (size_t idx = NUM_KEYS; idx < DeciderProvingKeys::BATCHED_EXTENDED_LENGTH; idx++) { + for (size_t idx = NUM_INSTANCES; idx < BATCHED_EXTENDED_LENGTH; idx++) { transcript->send_to_verifier("combiner_quotient_" + std::to_string(idx), combiner_quotient.value_at(idx)); } @@ -116,19 +110,19 @@ ProtogalaxyProver_::combiner_quotient_round(const std::vector< /** * @brief Given the challenge \gamma, compute Z(\gamma) and {L_0(\gamma),L_1(\gamma)} */ -template -void ProtogalaxyProver_::update_target_sum_and_fold( - const DeciderProvingKeys& keys, +template +void ProtogalaxyProver_::update_target_sum_and_fold( + const ProverInstances& instances, const CombinerQuotient& combiner_quotient, const UnivariateSubrelationSeparators& alphas, const UnivariateRelationParameters& univariate_relation_parameters, const FF& perturbator_evaluation) { - PROFILE_THIS_NAME("ProtogalaxyProver_::update_target_sum_and_fold"); + BB_BENCH_NAME("ProtogalaxyProver_::update_target_sum_and_fold"); - std::shared_ptr accumulator = keys[0]; - std::shared_ptr incoming = keys[1]; - accumulator->is_accumulator = true; + std::shared_ptr accumulator = instances[0]; + std::shared_ptr incoming = instances[1]; + accumulator->from_first_instance = true; // At this point the virtual sizes of the polynomials should already agree BB_ASSERT_EQ(accumulator->polynomials.w_l.virtual_size(), incoming->polynomials.w_l.virtual_size()); @@ -146,69 +140,108 @@ void ProtogalaxyProver_::update_target_sum_and_fold( // solution is to simply reverse the order or the terms in the linear combination by swapping the polynomials and // the lagrange coefficients between the accumulator and the incoming key. // TODO(https://github.com/AztecProtocol/barretenberg/issues/1417): make this swapping logic more robust. - if (incoming->get_overflow_size() > accumulator->get_overflow_size()) { + bool swap_polys = incoming->get_overflow_size() > accumulator->get_overflow_size(); + if (swap_polys) { std::swap(accumulator->polynomials, incoming->polynomials); // swap the polys std::swap(lagranges[0], lagranges[1]); // swap the lagrange coefficients so the sum is unchanged accumulator->set_dyadic_size(incoming->dyadic_size()); // update dyadic size of accumulator accumulator->set_overflow_size(incoming->get_overflow_size()); // swap overflow size } - // Fold the proving key polynomials - for (auto& poly : accumulator->polynomials.get_unshifted()) { - poly *= lagranges[0]; - } - for (auto [acc_poly, key_poly] : - zip_view(accumulator->polynomials.get_unshifted(), incoming->polynomials.get_unshifted())) { - acc_poly.add_scaled(key_poly, lagranges[1]); - } - - // Evaluate the combined batching α_i univariate at challenge to obtain next α_i and send it to the - // verifier, where i ∈ {0,...,NUM_SUBRELATIONS - 1} - for (auto [folded_alpha, key_alpha] : zip_view(accumulator->alphas, alphas)) { - folded_alpha = key_alpha.evaluate(combiner_challenge); + // Fold the prover polynomials + + // Convert the polynomials into spans to remove boundary checks and if checks that normally apply when calling + // getter/setters in Polynomial (see SharedShiftedVirtualZeroesArray::get) + auto accumulator_polys = accumulator->polynomials.get_unshifted(); + auto key_polys = incoming->polynomials.get_unshifted(); + const size_t num_polys = key_polys.size(); + + std::vector> acc_spans; + std::vector> key_spans; + acc_spans.reserve(num_polys); + key_spans.reserve(num_polys); + for (size_t i = 0; i < num_polys; ++i) { + acc_spans.emplace_back(static_cast>(accumulator_polys[i])); + key_spans.emplace_back(static_cast>(key_polys[i])); } - // Evaluate each relation parameter univariate at challenge to obtain the folded relation parameters. - for (auto [univariate, value] : - zip_view(univariate_relation_parameters.get_to_fold(), accumulator->relation_parameters.get_to_fold())) { - value = univariate.evaluate(combiner_challenge); + parallel_for([&acc_spans, &key_spans, &lagranges, &combiner_challenge, &swap_polys](const ThreadChunk& chunk) { + for (auto [acc_poly, key_poly] : zip_view(acc_spans, key_spans)) { + size_t offset = acc_poly.start_index; + for (size_t idx : chunk.range(acc_poly.size(), offset)) { + if ((idx < key_poly.start_index) || (idx >= key_poly.end_index())) { + acc_poly[idx] *= lagranges[0]; + } else { + // acc * lagranges[0] + key * lagranges[1] = + // acc + (key - acc) * combiner_challenge (if !swap_polys) + // key + (acc - key) * combiner_challenge (if swap_polys) + if (swap_polys) { + acc_poly[idx] = key_poly[idx] + (acc_poly[idx] - key_poly[idx]) * combiner_challenge; + } else { + acc_poly[idx] = acc_poly[idx] + (key_poly[idx] - acc_poly[idx]) * combiner_challenge; + } + } + } + } + }); + + { + BB_BENCH_NAME("ProtogalaxyProver_::update_target_sum_and_fold::update_alphas_and_relation_parameters"); + + parallel_for([&](const ThreadChunk& chunk) { + // Evaluate the combined batching α_i univariate at challenge to obtain next α_i and send it to the + // verifier, where i ∈ {0,...,NUM_SUBRELATIONS - 1} + for (size_t i : chunk.range(NUM_SUBRELATIONS)) { + accumulator->alphas[i] = alphas[i].evaluate(combiner_challenge); + } + }); + + auto univariate_params_to_fold = univariate_relation_parameters.get_to_fold(); + auto accumulator_params_to_fold = accumulator->relation_parameters.get_to_fold(); + parallel_for([&](const ThreadChunk& chunk) { + // Evaluate each relation parameter univariate at challenge to obtain the folded relation parameters. + for (size_t i : chunk.range(univariate_params_to_fold.size())) { + accumulator_params_to_fold[i] = univariate_params_to_fold[i].evaluate(combiner_challenge); + } + }); } } -template FoldingResult ProtogalaxyProver_::prove() +template FoldingResult ProtogalaxyProver_::prove() { - PROFILE_THIS_NAME("ProtogalaxyProver::prove"); + BB_BENCH_NAME("ProtogalaxyProver::prove"); - // Ensure keys are all of the same size + // Ensure instances are all of the same size size_t max_circuit_size = 0; - for (size_t idx = 0; idx < NUM_KEYS; ++idx) { - max_circuit_size = std::max(max_circuit_size, keys_to_fold[idx]->dyadic_size()); + for (size_t idx = 0; idx < NUM_INSTANCES; ++idx) { + max_circuit_size = std::max(max_circuit_size, prover_insts_to_fold[idx]->dyadic_size()); } - for (size_t idx = 0; idx < NUM_KEYS; ++idx) { - if (keys_to_fold[idx]->dyadic_size() != max_circuit_size) { + for (size_t idx = 0; idx < NUM_INSTANCES; ++idx) { + if (prover_insts_to_fold[idx]->dyadic_size() != max_circuit_size) { info("ProtogalaxyProver: circuit size mismatch - increasing virtual size of key ", idx, " from ", - keys_to_fold[idx]->dyadic_size(), + prover_insts_to_fold[idx]->dyadic_size(), " to ", max_circuit_size); - keys_to_fold[idx]->polynomials.increase_polynomials_virtual_size(max_circuit_size); + prover_insts_to_fold[idx]->polynomials.increase_polynomials_virtual_size(max_circuit_size); } } - run_oink_prover_on_each_incomplete_key(); + run_oink_prover_on_each_incomplete_instance(); vinfo("oink prover on each incomplete key"); std::tie(deltas, perturbator) = perturbator_round(accumulator); vinfo("perturbator round"); std::tie(accumulator->gate_challenges, alphas, relation_parameters, perturbator_evaluation, combiner_quotient) = - combiner_quotient_round(accumulator->gate_challenges, deltas, keys_to_fold); + combiner_quotient_round(accumulator->gate_challenges, deltas, prover_insts_to_fold); vinfo("combiner quotient round"); - update_target_sum_and_fold(keys_to_fold, combiner_quotient, alphas, relation_parameters, perturbator_evaluation); + update_target_sum_and_fold( + prover_insts_to_fold, combiner_quotient, alphas, relation_parameters, perturbator_evaluation); vinfo("folded"); - return FoldingResult{ .accumulator = keys_to_fold[0], .proof = transcript->export_proof() }; + return FoldingResult{ .accumulator = prover_insts_to_fold[0], .proof = transcript->export_proof() }; } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp index f98de90eb257..4069295f9835 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp @@ -5,10 +5,11 @@ // ===================== #pragma once +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/common/container.hpp" -#include "barretenberg/common/op_count.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/honk/execution_trace/execution_trace_usage_tracker.hpp" +#include "barretenberg/protogalaxy/constants.hpp" #include "barretenberg/protogalaxy/prover_verifier_shared.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/relations/relation_types.hpp" @@ -20,35 +21,32 @@ namespace bb { /** * @brief A purely static class (never add state to this!) consisting of functions used by the Protogalaxy prover. * - * @tparam DeciderProvingKeys_ + * @tparam ProverInstance */ -template class ProtogalaxyProverInternal { +template class ProtogalaxyProverInternal { public: - using DeciderPKs = DeciderProvingKeys_; - using Flavor = typename DeciderPKs::Flavor; + using Flavor = typename ProverInstance::Flavor; using FF = typename Flavor::FF; - using DeciderPK = typename DeciderPKs::DeciderPK; using RelationUtils = bb::RelationUtils; using ProverPolynomials = typename Flavor::ProverPolynomials; using Relations = typename Flavor::Relations; using AllValues = typename Flavor::AllValues; using SubrelationSeparators = typename Flavor::SubrelationSeparators; - static constexpr size_t NUM_KEYS = DeciderProvingKeys_::NUM; - using UnivariateRelationParametersNoOptimisticSkipping = - bb::RelationParameters>; + using ProverInstances = std::array, NUM_INSTANCES>; + + static constexpr size_t EXTENDED_LENGTH = computed_extended_length(); + static constexpr size_t BATCHED_EXTENDED_LENGTH = computed_batched_extended_length(); + static constexpr size_t NUM_SUBRELATIONS = Flavor::NUM_SUBRELATIONS; + + using UnivariateRelationParametersNoOptimisticSkipping = bb::RelationParameters>; using UnivariateRelationParameters = - bb::RelationParameters>; - using UnivariateSubrelationSeparators = - std::array, Flavor::NUM_SUBRELATIONS - 1>; - - // The length of ExtendedUnivariate is the largest length (==max_relation_degree + 1) of a univariate polynomial - // obtained by composing a relation with Lagrange polynomial-linear combination of NUM-many decider pks, with - // relation parameters regarded as variables. - using ExtendedUnivariate = Univariate; - // Represents the total length of the combiner univariate, obtained by combining the already folded relations with - // the folded relation batching challenge. - using ExtendedUnivariateWithRandomization = - Univariate; + bb::RelationParameters>; + using UnivariateSubrelationSeparators = std::array, NUM_SUBRELATIONS - 1>; + + // Univariates that interpolate polynomial evaluations at a given vertex across two instances + using ExtendedUnivariate = Univariate; + // Combiner univariate + using ExtendedUnivariateWithRandomization = Univariate; /** * @brief ShortUnivariates is an optimisation to improve the evaluation of Flavor relations when the output is a @@ -67,26 +65,23 @@ template class ProtogalaxyProverInternal { * can go unused. By skipping the basis extension entirely we avoid this unneccessary work. * * Tests indicates that utilizing ShortUnivariates speeds up the `benchmark_client_ivc.sh` benchmark by 10% - * @note This only works if DeciderPKs::NUM == 2. The whole protogalaxy class would require substantial revision to - * support more PKs so this should be adequate for now + * @note This only works for two instances. */ - using ShortUnivariates = typename Flavor::template ProverUnivariates; + using ShortUnivariates = typename Flavor::template ProverUnivariates; using ExtendedUnivariates = typename Flavor::template ProverUnivariatesWithOptimisticSkipping; + /*skip_count=*/SKIP_COUNT>; using ExtendedUnivariatesType = std::conditional_t; - using TupleOfTuplesOfUnivariates = typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariates; + using TupleOfTuplesOfUnivariates = typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariates; using TupleOfTuplesOfUnivariatesNoOptimisticSkipping = - typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariatesNoOptimisticSkipping; + typename Flavor::template ProtogalaxyTupleOfTuplesOfUnivariatesNoOptimisticSkipping; using RelationEvaluations = decltype(create_tuple_of_arrays_of_values()); - static constexpr size_t NUM_SUBRELATIONS = DeciderPKs::NUM_SUBRELATIONS; - ExecutionTraceUsageTracker trace_usage_tracker; ProtogalaxyProverInternal(ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker{}) @@ -94,14 +89,56 @@ template class ProtogalaxyProverInternal { {} /** - * @brief A scale subrelations evaluations by challenges ('alphas') and part of the linearly dependent relation - * evaluation(s). + * @brief Constructs univariates that interpolate the values of each instance across a given row. * - * @details Note that a linearly dependent subrelation is not computed on a specific row but rather on the entire - * execution trace. + * @details The returned univariates are over the domain 0, .., EXTENDED_LENGTH - 1. + * + * @tparam skip_count The number of evaluations to skip in the returned univariates. Used only if not using short + * monomials. + * @param row_idx A fixed row position in several execution traces + * @return The univariates whose extensions will be used to construct the combiner. + */ + template static auto row_to_univariates(const ProverInstances& instances, size_t row_idx) + { + using ContainerType = + std::conditional_t, + std::array, NUM_INSTANCES>>; + // As a practical measure, get the first prover instance's view to deduce the array type + std::arraypolynomials.get_all()), NUM_INSTANCES> views; + views[0] = instances[0]->polynomials.get_all(); + views[1] = instances[1]->polynomials.get_all(); + + ContainerType results; + // Set the size corresponding to the number of rows in the execution trace + // Iterate over the prover polynomials' views corresponding to each prover instance + for (size_t inst_idx = 0; auto& get_all : views) { + // Iterate over all columns in the trace execution of an prover instance and extract their value at row_idx. + if constexpr (Flavor::USE_SHORT_MONOMIALS) { + // In this case, the elements of the ContainerType are AllEntities, so we need to get the underlying + // polynomials via get_all() + for (auto [result, poly_ptr] : zip_view(results.get_all(), get_all)) { + result.evaluations[inst_idx] = poly_ptr[row_idx]; + } + } else { + for (auto [result, poly_ptr] : zip_view(results, get_all)) { + result.evaluations[inst_idx] = poly_ptr[row_idx]; + } + } + inst_idx++; + } + return results; + } + + /** + * @brief Scale all linearly independent subrelations evaluations by challenges ('alphas'). + * + * @details Note that this is not done for linearly dependent subrelation, because their evaluation is not + * computed on a specific row but rather on the entire execution trace. * * @param evals The evaluations of all subrelations on some row - * @param challenges The 'alpha' challenges used to batch the subrelations + * @param challenges The 'alpha' challenges used to batch the subrelations (we use separate challenges rather than a + * single alpha raised to powers to avoid an unsustainable degree increase in the combiner polynomial) * @param linearly_dependent_contribution An accumulator for values of the linearly-dependent (i.e., 'whole-trace') * subrelations * @return FF The evaluation of the linearly-independent (i.e., 'per-row') subrelations @@ -134,10 +171,10 @@ template class ProtogalaxyProverInternal { /** * @brief Compute the values of the aggregated relation evaluations at each row in the execution trace, representing * f_i(ω) in the Protogalaxy paper, given the evaluations of all the prover polynomials and \vec{α} (the batching - * challenges that help establishing each subrelation is independently valid in Honk - from the Plonk paper, DO NOT - * confuse with α in Protogalaxy). + * challenges that help establishing each subrelation is independently valid in Mega Honk relation - this α is same + * as in the Plonk paper, DO NOT confuse with α in Protogalaxy). * - * @details When folding Mega decider proving keys, one of the relations is linearly dependent. We define such + * @details When folding Mega prover instances, one of the relations is linearly dependent. We define such * relations as acting on the entire execution trace and hence requiring to be accumulated separately as we iterate * over each row. At the end of the function, the linearly dependent contribution is accumulated at index 0 * representing the sum f_0(ω) + α_j*g(ω) where f_0 represents the full honk evaluation at row 0, g(ω) is the @@ -149,7 +186,7 @@ template class ProtogalaxyProverInternal { { - PROFILE_THIS_NAME("ProtogalaxyProver_::compute_row_evaluations"); + BB_BENCH_NAME("ProtogalaxyProver_::compute_row_evaluations"); const size_t polynomial_size = polynomials.get_polynomial_size(); Polynomial aggregated_relation_evaluations(polynomial_size); @@ -160,8 +197,7 @@ template class ProtogalaxyProverInternal { std::vector linearly_dependent_contribution_accumulators(num_threads); // Distribute the execution trace rows across threads so that each handles an equal number of active rows - trace_usage_tracker.construct_thread_ranges( - num_threads, polynomial_size, /*use_prev_accumulator_tracker=*/true); + trace_usage_tracker.construct_thread_ranges(num_threads, polynomial_size, /*use_prev_accumulator=*/true); parallel_for(num_threads, [&](size_t thread_idx) { for (const ExecutionTraceUsageTracker::Range& range : trace_usage_tracker.thread_ranges[thread_idx]) { @@ -242,12 +278,13 @@ template class ProtogalaxyProverInternal { } /** - * @brief Construct the power perturbator polynomial F(X) in coefficient form from the accumulator + * @brief Construct the perturbator polynomial F(X) in coefficient form from the accumulator resulted from a + * previous round of Protogalaxy */ - Polynomial compute_perturbator(const std::shared_ptr& accumulator, + Polynomial compute_perturbator(const std::shared_ptr& accumulator, const std::vector& deltas) { - PROFILE_THIS(); + BB_BENCH(); auto full_honk_evaluations = compute_row_evaluations(accumulator->polynomials, accumulator->alphas, accumulator->relation_parameters); const auto betas = accumulator->gate_challenges; @@ -263,32 +300,34 @@ template class ProtogalaxyProverInternal { for (size_t idx = log_circuit_size; idx < CONST_PG_LOG_N; ++idx) { perturbator.emplace_back(FF(0)); } + + // Check that the perturbator zeroth coefficient is equal to the target sum stored in the accumulator + BB_ASSERT_EQ(perturbator[0], + accumulator->target_sum, + "ProtogalaxyProver: the zeroth coefficient of the perturbator is different from the target sum " + "stored in the accumulator."); + return Polynomial{ perturbator }; } /** * @brief Prepare a univariate polynomial for relation execution in one step of the combiner construction. - * @details For a fixed prover polynomial index, extract that polynomial from each key in DeciderProvingKeys. From + * @details For a fixed prover polynomial index, extract that polynomial from each key in ProverInstances. From * each polynomial, extract the value at row_idx. Use these values to create a univariate polynomial, and then * extend (i.e., compute additional evaluations at adjacent domain values) as needed. - * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/751) Optimize memory */ - template BB_INLINE static void extend_univariates(ExtendedUnivariatesType& extended_univariates, - const DeciderPKs& keys, + const ProverInstances& instances, const size_t row_idx) { - PROFILE_THIS_NAME("PG::extend_univariates"); - if constexpr (Flavor::USE_SHORT_MONOMIALS) { - extended_univariates = std::move(keys.row_to_short_univariates(row_idx)); + extended_univariates = std::move(row_to_univariates(instances, row_idx)); } else { - auto incoming_univariates = - keys.template row_to_univariates(row_idx); + auto incoming_univariates = row_to_univariates(instances, row_idx); for (auto [extended_univariate, incoming_univariate] : zip_view(extended_univariates.get_all(), incoming_univariates)) { - incoming_univariate.template self_extend_from(); + incoming_univariate.template self_extend_from(); extended_univariate = std::move(incoming_univariate); } } @@ -350,21 +389,21 @@ template class ProtogalaxyProverInternal { * @param gate_separators * @return ExtendedUnivariateWithRandomization */ - ExtendedUnivariateWithRandomization compute_combiner(const DeciderPKs& keys, + ExtendedUnivariateWithRandomization compute_combiner(const ProverInstances& instances, const GateSeparatorPolynomial& gate_separators, const UnivariateRelationParameters& relation_parameters, const UnivariateSubrelationSeparators& alphas, TupleOfTuplesOfUnivariates& univariate_accumulators) { - PROFILE_THIS(); + BB_BENCH(); // Determine the number of threads over which to distribute the work // The polynomial size is given by the virtual size since the computation includes // the incoming key which could have nontrivial values on the larger domain in case of overflow. - const size_t common_polynomial_size = keys[0]->polynomials.w_l.virtual_size(); + const size_t common_polynomial_size = instances[0]->polynomials.w_l.virtual_size(); const size_t num_threads = compute_num_threads(common_polynomial_size); - // Univariates are optimised for usual PG, but we need the unoptimised version for tests (it's a version that + // Univariates are optimized for usual PG, but we need the unoptimized version for tests (it's a version that // doesn't skip computation), so we need to define types depending on the template instantiation using ThreadAccumulators = TupleOfTuplesOfUnivariates; @@ -382,11 +421,10 @@ template class ProtogalaxyProverInternal { for (const ExecutionTraceUsageTracker::Range& range : trace_usage_tracker.thread_ranges[thread_idx]) { for (size_t idx = range.first; idx < range.second; idx++) { - // Instantiate univariates, possibly with skipping toto ignore computation in those indices + // Instantiate univariates, possibly with skipping to ignore computation in those indices // (they are still available for skipping relations, but all derived univariate will ignore - // those evaluations) No need to initialise extended_univariates to 0, as it's assigned to. - constexpr size_t skip_count = DeciderPKs::NUM - 1; - extend_univariates(extended_univariates, keys, idx); + // those evaluations) No need to initialize extended_univariates to 0, as it's assigned to. + extend_univariates(extended_univariates, instances, idx); const FF pow_challenge = gate_separators[idx]; @@ -408,28 +446,28 @@ template class ProtogalaxyProverInternal { } // This does nothing if TupleOfTuples is TupleOfTuplesOfUnivariates TupleOfTuplesOfUnivariatesNoOptimisticSkipping deoptimized_univariates = - deoptimise_univariates(univariate_accumulators); + deoptimize_univariates(univariate_accumulators); // Batch the univariate contributions from each sub-relation to obtain the round univariate return batch_over_relations(deoptimized_univariates, alphas); } - ExtendedUnivariateWithRandomization compute_combiner(const DeciderPKs& keys, + ExtendedUnivariateWithRandomization compute_combiner(const ProverInstances& instances, const GateSeparatorPolynomial& gate_separators, const UnivariateRelationParameters& relation_parameters, const UnivariateSubrelationSeparators& alphas) { // Note: {} is required to initialize the tuple contents. Otherwise the univariates contain garbage. TupleOfTuplesOfUnivariates accumulators{}; - return compute_combiner(keys, gate_separators, relation_parameters, alphas, accumulators); + return compute_combiner(instances, gate_separators, relation_parameters, alphas, accumulators); } /** - * @brief Convert univariates from optimised form to regular - * @details We need to convert before we batch relations, since optimised versions don't have enough information to + * @brief Convert univariates from optimized form to regular + * @details We need to convert before we batch relations, since optimized versions don't have enough information to * extend the univariates to maximum length */ template - static TupleOfTuplesOfUnivariatesNoOptimisticSkipping deoptimise_univariates( + static TupleOfTuplesOfUnivariatesNoOptimisticSkipping deoptimize_univariates( const TupleOfTuplesOfUnivariatePossiblyOptimistic& tup) { // If input does not have optimized operators, return the input @@ -438,14 +476,14 @@ template class ProtogalaxyProverInternal { return tup; } - const auto deoptimise = [&](auto& element) { + const auto deoptimize = [&](auto& element) { auto& element_with_skipping = std::get(std::get(tup)); element = element_with_skipping.convert(); }; // Note: {} is required to initialize the tuple contents. Otherwise the univariates contain garbage. TupleOfTuplesOfUnivariatesNoOptimisticSkipping result{}; - RelationUtils::template apply_to_tuple_of_tuples(result, deoptimise); + RelationUtils::apply_to_tuple_of_tuples(result, deoptimize); return result; } @@ -453,8 +491,7 @@ template class ProtogalaxyProverInternal { TupleOfTuplesOfUnivariatesNoOptimisticSkipping& univariate_accumulators, const UnivariateSubrelationSeparators& alphas) { - auto result = - std::get<0>(std::get<0>(univariate_accumulators)).template extend_to(); + auto result = std::get<0>(std::get<0>(univariate_accumulators)).template extend_to(); size_t idx = 0; const auto scale_and_sum = [&](auto& element) { @@ -462,78 +499,47 @@ template class ProtogalaxyProverInternal { return; } - auto extended = element.template extend_to(); + auto extended = element.template extend_to(); extended *= alphas[idx]; result += extended; idx++; }; - RelationUtils::template apply_to_tuple_of_tuples(univariate_accumulators, scale_and_sum); + RelationUtils::apply_to_tuple_of_tuples(univariate_accumulators, scale_and_sum); RelationUtils::zero_univariates(univariate_accumulators); return result; } - static std::pair> + static std::pair> compute_vanishing_polynomial_and_lagranges(const FF& challenge) { FF vanishing_polynomial_at_challenge; - std::array lagranges; - constexpr FF inverse_two = FF(2).invert(); - - if constexpr (DeciderPKs::NUM == 2) { - vanishing_polynomial_at_challenge = challenge * (challenge - FF(1)); - lagranges = { FF(1) - challenge, challenge }; - } else if constexpr (DeciderPKs::NUM == 3) { - vanishing_polynomial_at_challenge = challenge * (challenge - FF(1)) * (challenge - FF(2)); - lagranges = { (FF(1) - challenge) * (FF(2) - challenge) * inverse_two, - challenge * (FF(2) - challenge), - challenge * (challenge - FF(1)) / FF(2) }; - } else if constexpr (DeciderPKs::NUM == 4) { - constexpr FF inverse_six = FF(6).invert(); - vanishing_polynomial_at_challenge = - challenge * (challenge - FF(1)) * (challenge - FF(2)) * (challenge - FF(3)); - lagranges = { (FF(1) - challenge) * (FF(2) - challenge) * (FF(3) - challenge) * inverse_six, - challenge * (FF(2) - challenge) * (FF(3) - challenge) * inverse_two, - challenge * (challenge - FF(1)) * (FF(3) - challenge) * inverse_two, - challenge * (challenge - FF(1)) * (challenge - FF(2)) * inverse_six }; - } - static_assert(DeciderPKs::NUM < 5); + std::array lagranges; + vanishing_polynomial_at_challenge = challenge * (challenge - FF(1)); + lagranges = { FF(1) - challenge, challenge }; return { vanishing_polynomial_at_challenge, lagranges }; } /** - * @brief Compute the combiner quotient defined as $K$ polynomial in the paper. + * @brief Compute the combiner quotient defined as $K$ polynomial in the paper specialised for only folding two + * instances at once. */ - static Univariate compute_combiner_quotient( + static Univariate compute_combiner_quotient( FF perturbator_evaluation, ExtendedUnivariateWithRandomization combiner) { - std::array combiner_quotient_evals = {}; - - constexpr FF inverse_two = FF(2).invert(); - constexpr FF inverse_six = FF(6).invert(); - for (size_t point = DeciderPKs::NUM; point < combiner.size(); point++) { - auto idx = point - DeciderPKs::NUM; - FF lagrange_0; - FF vanishing_polynomial; - if constexpr (DeciderPKs::NUM == 2) { - lagrange_0 = FF(1) - FF(point); - vanishing_polynomial = FF(point) * (FF(point) - 1); - } else if constexpr (DeciderPKs::NUM == 3) { - lagrange_0 = (FF(1) - FF(point)) * (FF(2) - FF(point)) * inverse_two; - vanishing_polynomial = FF(point) * (FF(point) - 1) * (FF(point) - 2); - } else if constexpr (DeciderPKs::NUM == 4) { - lagrange_0 = (FF(1) - FF(point)) * (FF(2) - FF(point)) * (FF(3) - FF(point)) * inverse_six; - vanishing_polynomial = FF(point) * (FF(point) - 1) * (FF(point) - 2) * (FF(point) - 3); - } - static_assert(DeciderPKs::NUM < 5); + std::array combiner_quotient_evals = {}; + for (size_t point = NUM_INSTANCES; point < combiner.size(); point++) { + auto idx = point - NUM_INSTANCES; + FF lagrange_0 = FF(1) - FF(point); + FF vanishing_polynomial = FF(point) * (FF(point) - 1); combiner_quotient_evals[idx] = (combiner.value_at(point) - perturbator_evaluation * lagrange_0) * vanishing_polynomial.invert(); } - return Univariate(combiner_quotient_evals); + return Univariate(combiner_quotient_evals); } /** @@ -541,18 +547,15 @@ template class ProtogalaxyProverInternal { * combiner compute. */ template - static ExtendedRelationParameters compute_extended_relation_parameters(const DeciderPKs& keys) + static ExtendedRelationParameters compute_extended_relation_parameters(const ProverInstances& instances) { using UnivariateParameter = typename ExtendedRelationParameters::DataType; ExtendedRelationParameters result; size_t param_idx = 0; for (auto& param : result.get_to_fold()) { - Univariate tmp(0); - size_t key_idx = 0; - for (auto& key : keys) { - tmp.value_at(key_idx) = key->relation_parameters.get_to_fold()[param_idx]; - key_idx++; - } + Univariate tmp(0); + tmp.value_at(0) = instances[0]->relation_parameters.get_to_fold()[param_idx]; + tmp.value_at(1) = instances[1]->relation_parameters.get_to_fold()[param_idx]; param = tmp.template extend_to(); param_idx++; } @@ -560,21 +563,18 @@ template class ProtogalaxyProverInternal { } /** - * @brief Combine the relation batching parameters (alphas) from each decider proving key into a univariate for - * using in the combiner computation. + * @brief Combine the relation batching parameters (alphas) from each prover instance into a univariate for the + * combiner computation. */ - static UnivariateSubrelationSeparators compute_and_extend_alphas(const DeciderPKs& keys) + static UnivariateSubrelationSeparators compute_and_extend_alphas(const ProverInstances& instances) { UnivariateSubrelationSeparators result; size_t alpha_idx = 0; for (auto& alpha : result) { - Univariate tmp; - size_t key_idx = 0; - for (auto& key : keys) { - tmp.value_at(key_idx) = key->alphas[alpha_idx]; - key_idx++; - } - alpha = tmp.template extend_to(); + Univariate tmp; + tmp.value_at(0) = instances[0]->alphas[alpha_idx]; + tmp.value_at(1) = instances[1]->alphas[alpha_idx]; + alpha = tmp.template extend_to(); alpha_idx++; } return result; diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_mega.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_mega.cpp index c5e51fb81fda..a28e7e9a32f7 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_mega.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_mega.cpp @@ -6,10 +6,9 @@ // Note: this is split up from protogalaxy_prover_impl.hpp for compile performance reasons #include "barretenberg/flavor/flavor.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" #include "barretenberg/ultra_honk/oink_prover.hpp" #include "protogalaxy_prover_impl.hpp" namespace bb { -template class ProtogalaxyProver_; -} // namespace bb \ No newline at end of file +template class ProtogalaxyProver_; +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp index 28ffcee2c3b1..676afd1b6db9 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp @@ -12,75 +12,50 @@ namespace bb { -template -void ProtogalaxyVerifier_::run_oink_verifier_on_each_incomplete_key( - const std::vector& proof) +template +void ProtogalaxyVerifier_::run_oink_verifier_on_each_incomplete_instance(const std::vector& proof) { transcript->load_proof(proof); - auto key = keys_to_fold[0]; + auto inst = insts_to_fold[0]; auto domain_separator = std::to_string(0); - if (!key->is_accumulator) { - OinkVerifier oink_verifier{ key, transcript, domain_separator + '_' }; + if (!inst->is_complete) { + OinkVerifier oink_verifier{ inst, transcript, domain_separator + '_' }; oink_verifier.verify(); - key->target_sum = 0; - key->gate_challenges = std::vector(CONST_PG_LOG_N, 0); - } else { - // Fiat-Shamir the verifier accumulator - FF accum_hash = key->add_hash_to_transcript("", *transcript); - info("Accumulator hash in PG verifier: ", accum_hash); + inst->target_sum = 0; + // Get the gate challenges for sumcheck/combiner computation + inst->gate_challenges = + transcript->template get_powers_of_challenge(domain_separator + "_gate_challenge", CONST_PG_LOG_N); } - key = keys_to_fold[1]; + inst = insts_to_fold[1]; domain_separator = std::to_string(1); - OinkVerifier oink_verifier{ key, transcript, domain_separator + '_' }; + OinkVerifier oink_verifier{ inst, transcript, domain_separator + '_' }; oink_verifier.verify(); - public_inputs = std::move(oink_verifier.public_inputs); } -template +template std::tuple> compute_vanishing_polynomial_and_lagrange_evaluations(const FF& combiner_challenge) { - static_assert(NUM < 5); - static constexpr FF inverse_two = FF(2).invert(); - - std::vector lagranges(NUM); - FF vanishing_polynomial_at_challenge; - if constexpr (NUM == 2) { - vanishing_polynomial_at_challenge = combiner_challenge * (combiner_challenge - FF(1)); - lagranges = { FF(1) - combiner_challenge, combiner_challenge }; - } else if constexpr (NUM == 3) { - vanishing_polynomial_at_challenge = - combiner_challenge * (combiner_challenge - FF(1)) * (combiner_challenge - FF(2)); - lagranges = { (FF(1) - combiner_challenge) * (FF(2) - combiner_challenge) * inverse_two, - combiner_challenge * (FF(2) - combiner_challenge), - combiner_challenge * (combiner_challenge - FF(1)) * inverse_two }; - } else if constexpr (NUM == 4) { - static constexpr FF inverse_six = FF(6).invert(); - vanishing_polynomial_at_challenge = combiner_challenge * (combiner_challenge - FF(1)) * - (combiner_challenge - FF(2)) * (combiner_challenge - FF(3)); - lagranges = { (FF(1) - combiner_challenge) * (FF(2) - combiner_challenge) * (FF(3) - combiner_challenge) * - inverse_six, - combiner_challenge * (FF(2) - combiner_challenge) * (FF(3) - combiner_challenge) * inverse_two, - combiner_challenge * (combiner_challenge - FF(1)) * (FF(3) - combiner_challenge) * inverse_two, - combiner_challenge * (combiner_challenge - FF(1)) * (combiner_challenge - FF(2)) * inverse_six }; - } + FF vanishing_polynomial_at_challenge = combiner_challenge * (combiner_challenge - FF(1)); + std::vector lagranges = { FF(1) - combiner_challenge, combiner_challenge }; return std::make_tuple(vanishing_polynomial_at_challenge, lagranges); } -template -std::shared_ptr ProtogalaxyVerifier_< - DeciderVerificationKeys>::verify_folding_proof(const std::vector& proof) +template +std::shared_ptr ProtogalaxyVerifier_::verify_folding_proof( + const std::vector& proof) { - static constexpr size_t BATCHED_EXTENDED_LENGTH = DeciderVerificationKeys::BATCHED_EXTENDED_LENGTH; - static constexpr size_t NUM_KEYS = DeciderVerificationKeys::NUM; + // The degree of the combiner quotient (K in the paper) is dk - k - 1 = k(d - 1) - 1. + // Hence we need k(d - 1) evaluations to represent it. + static constexpr size_t COMBINER_QUOTIENT_LENGTH = BATCHED_EXTENDED_LENGTH - NUM_INSTANCES; - const std::shared_ptr& accumulator = keys_to_fold[0]; + const std::shared_ptr& accumulator = insts_to_fold[0]; - run_oink_verifier_on_each_incomplete_key(proof); + run_oink_verifier_on_each_incomplete_instance(proof); // Perturbator round - const FF delta = transcript->template get_challenge("delta"); - const std::vector deltas = compute_round_challenge_pows(CONST_PG_LOG_N, delta); + const std::vector deltas = transcript->template get_powers_of_challenge("delta", CONST_PG_LOG_N); + std::vector perturbator_coeffs(CONST_PG_LOG_N + 1, 0); for (size_t idx = 1; idx <= CONST_PG_LOG_N; idx++) { perturbator_coeffs[idx] = transcript->template receive_from_prover("perturbator_" + std::to_string(idx)); @@ -92,56 +67,50 @@ std::shared_ptr ProtogalaxyVerifier const Polynomial perturbator(perturbator_coeffs); const FF perturbator_evaluation = perturbator.evaluate(perturbator_challenge); - std::array - combiner_quotient_evals; // The degree of the combiner quotient (K in the paper) is dk - k - 1 = k(d - 1) - 1. - // Hence we need k(d - 1) evaluations to represent it. - for (size_t idx = DeciderVerificationKeys::NUM; auto& val : combiner_quotient_evals) { + std::array combiner_quotient_evals; + for (size_t idx = NUM_INSTANCES; auto& val : combiner_quotient_evals) { val = transcript->template receive_from_prover("combiner_quotient_" + std::to_string(idx++)); } // Folding const FF combiner_challenge = transcript->template get_challenge("combiner_quotient_challenge"); - const Univariate combiner_quotient(combiner_quotient_evals); + const Univariate combiner_quotient(combiner_quotient_evals); const FF combiner_quotient_evaluation = combiner_quotient.evaluate(combiner_challenge); - auto next_accumulator = std::make_shared(); - next_accumulator->vk = std::make_shared(*accumulator->vk); - next_accumulator->is_accumulator = true; - - // Set the accumulator circuit size data based on the max of the keys being accumulated - const size_t accumulator_log_circuit_size = keys_to_fold.get_max_log_circuit_size(); - next_accumulator->vk->log_circuit_size = accumulator_log_circuit_size; + // Set a constant virtual log circuit size in the accumulator + const size_t accumulator_log_circuit_size = CONST_PG_LOG_N; + accumulator->vk->log_circuit_size = accumulator_log_circuit_size; // Compute next folding parameters const auto [vanishing_polynomial_at_challenge, lagranges] = - compute_vanishing_polynomial_and_lagrange_evaluations(combiner_challenge); - next_accumulator->target_sum = + compute_vanishing_polynomial_and_lagrange_evaluations(combiner_challenge); + accumulator->target_sum = perturbator_evaluation * lagranges[0] + vanishing_polynomial_at_challenge * combiner_quotient_evaluation; - next_accumulator->gate_challenges = // note: known already in previous round + accumulator->gate_challenges = // note: known already in previous round update_gate_challenges(perturbator_challenge, accumulator->gate_challenges, deltas); - // // Fold the commitments + // Fold the commitments for (auto [combination, to_combine] : - zip_view(next_accumulator->vk->get_all(), keys_to_fold.get_precomputed_commitments())) { + zip_view(accumulator->vk->get_all(), get_data_to_fold())) { combination = batch_mul_native(to_combine, lagranges); } for (auto [combination, to_combine] : - zip_view(next_accumulator->witness_commitments.get_all(), keys_to_fold.get_witness_commitments())) { + zip_view(accumulator->witness_commitments.get_all(), get_data_to_fold())) { combination = batch_mul_native(to_combine, lagranges); } // Fold the relation parameters - for (auto [combination, to_combine] : zip_view(next_accumulator->alphas, keys_to_fold.get_alphas())) { - combination = linear_combination(to_combine, lagranges); + for (auto [combination, to_combine] : zip_view(accumulator->alphas, get_data_to_fold())) { + combination = to_combine[0] + combiner_challenge * (to_combine[1] - to_combine[0]); } - for (auto [combination, to_combine] : - zip_view(next_accumulator->relation_parameters.get_to_fold(), keys_to_fold.get_relation_parameters())) { - combination = linear_combination(to_combine, lagranges); + for (auto [combination, to_combine] : zip_view(accumulator->relation_parameters.get_to_fold(), + get_data_to_fold())) { + combination = to_combine[0] + combiner_challenge * (to_combine[1] - to_combine[0]); } - return next_accumulator; + return accumulator; } -template class ProtogalaxyVerifier_>; +template class ProtogalaxyVerifier_>; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp index ff555cf70153..8e1c64528c85 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp @@ -7,49 +7,116 @@ #pragma once #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/protogalaxy/constants.hpp" #include "barretenberg/protogalaxy/folding_result.hpp" #include "barretenberg/transcript/transcript.hpp" -#include "barretenberg/ultra_honk/decider_keys.hpp" +#include "barretenberg/ultra_honk/verifier_instance.hpp" +#include namespace bb { -template class ProtogalaxyVerifier_ { +template class ProtogalaxyVerifier_ { public: - using Flavor = typename DeciderVerificationKeys::Flavor; + using Flavor = typename VerifierInstance::Flavor; using Transcript = typename Flavor::Transcript; using FF = typename Flavor::FF; using Commitment = typename Flavor::Commitment; - using DeciderVK = typename DeciderVerificationKeys::DeciderVK; using VerificationKey = typename Flavor::VerificationKey; using WitnessCommitments = typename Flavor::WitnessCommitments; using CommitmentLabels = typename Flavor::CommitmentLabels; using SubrelationSeparators = typename Flavor::SubrelationSeparators; + using VerifierInstances = std::array, NUM_INSTANCES>; static constexpr size_t NUM_SUBRELATIONS = Flavor::NUM_SUBRELATIONS; + static constexpr size_t BATCHED_EXTENDED_LENGTH = computed_batched_extended_length(); - DeciderVerificationKeys keys_to_fold; - - std::vector public_inputs; // extracted from the incoming instance + VerifierInstances insts_to_fold; std::shared_ptr transcript = std::make_shared(); - ProtogalaxyVerifier_(const std::vector>& keys, - const std::shared_ptr& transcript) - : keys_to_fold(DeciderVerificationKeys(keys)) - , transcript(transcript){}; + ProtogalaxyVerifier_(const VerifierInstances& insts, const std::shared_ptr& transcript) + : insts_to_fold(insts) + , transcript(transcript) {}; ~ProtogalaxyVerifier_() = default; /** - * @brief Instatiate the vks and the transcript. + * @brief Instatiate the verifier instances and the transcript. * * @param fold_data The data transmitted via the transcript by the prover. */ - void run_oink_verifier_on_each_incomplete_key(const std::vector&); + void run_oink_verifier_on_each_incomplete_instance(const std::vector&); /** * @brief Run the folding protocol on the verifier side to establish whether the public data ϕ of the new - * accumulator, received from the prover is the same as that produced by the verifier. + * accumulator, received from the prover, is the same as that produced by the verifier. */ - std::shared_ptr verify_folding_proof(const std::vector&); + std::shared_ptr verify_folding_proof(const std::vector&); + + private: + enum class FOLDING_DATA : std::uint8_t { + PRECOMPUTED_COMMITMENTS, + WITNESS_COMMITMENTS, + ALPHAS, + RELATION_PARAMETERS + }; + + /** + * @brief Get data to be folded grouped by commitment index + * @example Assume the VKs are arranged as follows + * VK 0 VK 1 VK 2 VK 3 + * q_c_0 q_c_1 q_c_2 q_c_3 + * q_l_0 q_l_1 q_l_2 q_l_3 + * ⋮ ⋮ ⋮ ⋮ + * If we wanted to extract the commitments from the verification keys in order to fold them, we would pass to the + * function the type parameter FOLDING_DATA::PRECOMPUTED_COMMITMETS, and the function would return + * {{q_c_0, q_c_1, q_c_2, q_c_3}, {q_l_0, q_l_1, q_l_2, q_l_3},...}. Here the "commitment index" is the index of the + * row in the matrix whose columns are given be the instance components to be folded. + * + * @tparam FoldingData The type of the parameter to be folded + */ + template auto get_data_to_fold() const + { + using PrecomputeCommDataType = RefArray; + using WitnessCommitmentsDataType = RefArray; + using AlphasDataType = Flavor::SubrelationSeparators; + using RelationParametersDataType = RefArray::NUM_TO_FOLD>; + using DataType = std::conditional_t< + FoldingData == FOLDING_DATA::PRECOMPUTED_COMMITMENTS, + PrecomputeCommDataType, + std::conditional_t< + FoldingData == FOLDING_DATA::WITNESS_COMMITMENTS, + WitnessCommitmentsDataType, + std::conditional_t>>; + + std::array data; + if constexpr (FoldingData == FOLDING_DATA::PRECOMPUTED_COMMITMENTS) { + data[0] = insts_to_fold[0]->vk->get_all(); + data[1] = insts_to_fold[1]->vk->get_all(); + } else if constexpr (FoldingData == FOLDING_DATA::WITNESS_COMMITMENTS) { + data[0] = insts_to_fold[0]->witness_commitments.get_all(); + data[1] = insts_to_fold[1]->witness_commitments.get_all(); + } else if constexpr (FoldingData == FOLDING_DATA::ALPHAS) { + data[0] = insts_to_fold[0]->alphas; + data[1] = insts_to_fold[1]->alphas; + } else if constexpr (FoldingData == FOLDING_DATA::RELATION_PARAMETERS) { + data[0] = insts_to_fold[0]->relation_parameters.get_to_fold(); + data[1] = insts_to_fold[1]->relation_parameters.get_to_fold(); + } else { + throw_or_abort("Invalid folding data type."); + } + + // Extract data type (strip references for storage in std::vector) + using ReturnType = decltype(data[0][0]); + using ReturnValue = std::remove_reference_t; + + const size_t num_to_fold = data[0].size(); + std::vector> result(num_to_fold, std::vector(NUM_INSTANCES)); + for (size_t idx = 0; auto& commitment_at_idx : result) { + commitment_at_idx[0] = data[0][idx]; + commitment_at_idx[1] = data[1][idx]; + idx++; + } + return result; + } }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/prover_verifier_shared.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/prover_verifier_shared.hpp index ecc8c299524d..3a08c746a360 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/prover_verifier_shared.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/prover_verifier_shared.hpp @@ -17,6 +17,8 @@ std::vector update_gate_challenges(const FF& perturbator_challenge, const std::vector& gate_challenges, const std::vector& init_challenges) { + BB_ASSERT_EQ( + gate_challenges.size(), init_challenges.size(), "gate_challenges and init_challenges must have same size"); const size_t num_challenges = gate_challenges.size(); std::vector next_gate_challenges(num_challenges); @@ -25,19 +27,6 @@ std::vector update_gate_challenges(const FF& perturbator_challenge, } return next_gate_challenges; } -/** - * @brief Given δ, compute the vector [δ, δ^2,..., δ^num_powers]. - * @details This is Step 2 of the protocol as written in the paper. - */ -template std::vector compute_round_challenge_pows(const size_t num_powers, const FF& round_challenge) -{ - std::vector pows(num_powers); - pows[0] = round_challenge; - for (size_t i = 1; i < num_powers; i++) { - pows[i] = pows[i - 1].sqr(); - } - return pows; -} /** * @brief Evaluates the perturbator at a given scalar, in a sequential manner for the recursive setting. @@ -55,7 +44,9 @@ template static FF evaluate_perturbator(std::vector coeffs, FF FF result = FF(0); for (size_t i = 0; i < coeffs.size(); i++) { result += coeffs[i] * point_acc; - point_acc *= point; + if (i < coeffs.size() - 1) { + point_acc *= point; + } } return result; }; diff --git a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp index 3e99c94906ff..5c6efb1d15d1 100644 --- a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp @@ -38,15 +38,25 @@ namespace bb { * subrelations can be expressed as follows: * * (1) I_i * (read_term_i) * (write_term_i) - 1 = 0 + * In reality this relation is I_i * (read_term_i) * (write_term_i) - inverse_exists = 0, i.e. it is only checked for + * active gates (more explanation below). * * (2) \sum_{i=0}^{n-1} [q_{logderiv_lookup} * I_i * write_term_i + read_count_i * I_i * read_term_i] = 0 * * Each column of the DataBus requires its own pair of subrelations. The column being read is selected via a unique * product, i.e. a lookup from bus column j is selected via q_busread * q_j (j = 1,2,...). * - * Note: that the latter subrelation is "linearly dependent" in the sense that it establishes that a sum across all - * rows of the exectution trace is zero, rather than that some expression holds independently at each row. Accordingly, - * this subrelation is not multiplied by a scaling factor at each accumulation step. + * To not compute the inverse terms packed in I_i for indices that not included in the sum we introduce a + * witness called inverse_exists, which is zero when either read_count_i is nonzero (a boolean called read_tag) or we + * have a read gate. This is represented by setting inverse_exists = 1- (1- read_tag)*(1- is_read_gate). Since read_gate + * is only dependent on selector values, we can assume that the verifier can check that it is boolean. However, if + * read_tag (which is a derived witness), is not constrained to be boolean, one can set the inverse_exists to 0, even + * when is_read_gate is 1, because inverse_exists is a linear function of read_tag then. Thus we have a third + * subrelation, that ensures that read_tag is a boolean value. + * (3) read_tag * read_tag - read_tag = 0 + * Note: that subrelation (2) is "linearly dependent" in the sense that it establishes that a sum + * across all rows of the exectution trace is zero, rather than that some expression holds independently at each row. + * Accordingly, this subrelation is not multiplied by a scaling factor at each accumulation step. * */ template class DatabusLookupRelationImpl { @@ -97,7 +107,7 @@ template class DatabusLookupRelationImpl { // The lookup subrelations are "linearly dependent" in the sense that they establish the value of a sum across the // entire execution trace rather than a per-row identity. - static constexpr std::array SUBRELATION_LINEARLY_INDEPENDENT = { + static constexpr std::array SUBRELATION_LINEARLY_INDEPENDENT = { INVERSE_SUBREL_LIN_INDEPENDENT, LOOKUP_SUBREL_LIN_INDEPENDENT, READ_TAG_BOOLEAN_CHECK_LIN_INDEPENDENT, INVERSE_SUBREL_LIN_INDEPENDENT, LOOKUP_SUBREL_LIN_INDEPENDENT, READ_TAG_BOOLEAN_CHECK_LIN_INDEPENDENT, INVERSE_SUBREL_LIN_INDEPENDENT, LOOKUP_SUBREL_LIN_INDEPENDENT, READ_TAG_BOOLEAN_CHECK_LIN_INDEPENDENT @@ -162,7 +172,9 @@ template class DatabusLookupRelationImpl { * @brief Compute the Accumulator whose values indicate whether the inverse is computed or not * @details This is needed for efficiency since we don't need to compute the inverse unless the log derivative * lookup relation is active at a given row. - * @note read_counts is constructed such that read_count_i <= 1 and is thus treated as boolean. + * We skip the inverse computation for all the rows that read_count_i == 0 AND read_selector is 0 + * @note read_tag is constructed such that read_tag_i = 1 or 0. We add a subrelation to check that read_tag is a + * boolean value * */ template @@ -174,6 +186,11 @@ template class DatabusLookupRelationImpl { const auto read_tag_m = CoefficientAccumulator(BusData::read_tags(in)); // does row contain data being read const Accumulator read_tag(read_tag_m); + // Relation checking: is_read_gate == 1 || read_tag == 1 + // Important note: the relation written below assumes that is_read_gate and read_tag are boolean values, which + // is guaranteed by the boolean_check subrelation. If not, fixing one of the two, the return value is a linear + // function in the other variable and can be set to an arbitrary value independent of the fixed value. See the + // boolean_check subrelation for more explanation. // degree 2(2) 1 2 (2) 1 // Degree 3 (3) return is_read_gate + read_tag - (is_read_gate * read_tag); // Degree 3 (5) } @@ -252,7 +269,7 @@ template class DatabusLookupRelationImpl { auto& relation_parameters, const size_t circuit_size) { - PROFILE_THIS_NAME("Databus::compute_logderivative_inverse"); + BB_BENCH_NAME("Databus::compute_logderivative_inverse"); auto& inverse_polynomial = BusData::inverses(polynomials); size_t min_iterations_per_thread = 1 << 6; // min number of iterations for which we'll spin up a unique thread @@ -297,8 +314,8 @@ template class DatabusLookupRelationImpl { /** * @brief Accumulate the subrelation contributions for reads from a single databus column - * @details Two subrelations are required per bus column, one to establish correctness of the precomputed inverses - * and one to establish the validity of the read. + * @details Three subrelations are required per bus column, one to establish correctness of the precomputed + * inverses, one to establish the validity of the read, and one to ensure read_tags is a boolean value * * @param accumulator * @param in @@ -315,7 +332,6 @@ template class DatabusLookupRelationImpl { const Parameters& params, const FF& scaling_factor) { - PROFILE_THIS_NAME("DatabusRead::accumulate"); using Accumulator = typename std::tuple_element_t<4, ContainerOverSubrelations>; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; using ShortAccumulator = std::tuple_element_t<2, ContainerOverSubrelations>; @@ -328,9 +344,11 @@ template class DatabusLookupRelationImpl { const auto read_selector = get_read_selector(in); // Degree 2 (2) // Determine which pair of subrelations to update based on which bus column is being read + // The inverse relation subrelation index constexpr size_t subrel_idx_1 = NUM_SUB_RELATION_PER_IDX * bus_idx; + // The lookup relation subrelation index constexpr size_t subrel_idx_2 = NUM_SUB_RELATION_PER_IDX * bus_idx + 1; - // the subrelation index for checking the read_tag is boolean + // The read_tag boolean check subrelation index constexpr size_t subrel_idx_3 = NUM_SUB_RELATION_PER_IDX * bus_idx + 2; // Establish the correctness of the polynomial of inverses I. Note: inverses is computed so that the value @@ -358,8 +376,6 @@ template class DatabusLookupRelationImpl { /** * @brief Accumulate the log derivative databus lookup argument subrelation contributions for each databus column - * @details Each databus column requires three subrelations. the last relation is to make sure that the read_tag is - * a boolean value. check the logderiv_lookup_relation.hpp for more details. * @param accumulator transformed to `evals + C(in(X)...)*scaling_factor` * @param in an std::array containing the fully extended Accumulator edges. * @param params contains beta, gamma, and public_input_delta, .... diff --git a/barretenberg/cpp/src/barretenberg/relations/delta_range_constraint_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/delta_range_constraint_relation.hpp index 8348f6ab793f..f377087a7621 100644 --- a/barretenberg/cpp/src/barretenberg/relations/delta_range_constraint_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/delta_range_constraint_relation.hpp @@ -50,7 +50,6 @@ template class DeltaRangeConstraintRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("DeltaRange::accumulate"); using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; @@ -98,4 +97,4 @@ template class DeltaRangeConstraintRelationImpl { template using DeltaRangeConstraintRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_op_queue_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_op_queue_relation.hpp index 7f6cb02dd9ed..776b10ec3a5b 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_op_queue_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_op_queue_relation.hpp @@ -57,7 +57,6 @@ template class EccOpQueueRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("EccOp::accumulate"); using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; // We skip using the CoefficientAccumulator type in this relation, as the overall relation degree is low (deg diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp index 469a7029018a..06cf7bdba3b8 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp @@ -103,31 +103,50 @@ template class ECCVMLookupRelationImpl { } return Accumulator(1); } - + /** + * @brief Returns the fingerprint of `(precompute_pc, compressed_slice, (2 * compressed_slice - 15)[P])`, where [P] + * is the point corresponding to `precompute_pc` and `compressed_slice`∈{0, ..., 15}. + */ template static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; static_assert(write_index < WRITE_TERMS); - - // what are we looking up? - // we want to map: - // 1: point pc - // 2: point slice - // 3: point x - // 4: point y - // for each point in our point table, we want to map `slice` to (x, -y) AND `slice + 8` to (x, y) + // write_index == 0 means our wNAF digit is positive (i.e., ∈{1, 3..., 15}). + // write_index == 1 means our wNAF digit is negative (i.e., ∈{-15, -13..., -1}) // round starts at 0 and increments to 7 // point starts at 15[P] and decrements to [P] // a slice value of 0 maps to -15[P] - // 1 -> -13[P] - // 7 -> -[P] - // 8 -> P - // 15 -> 15[P] - // negative points map pc, round, x, -y - // positive points map pc, 15 - (round * 2), x, y + + // we have computed `(15 - 2 * round)[P] =: (precompute_tx, precompute_ty)`. + // `round`∈{0, 1..., 7} + // if write_index == 0, we want to write (pc, 15 - 2 * round, precompute_tx, precompute_ty) + // if write_index == 1, we want to write (pc, round, precompute_tx, -precompute_ty) + // to sum up, both: + // (pc, round, precompute_tx, -precompute_ty) _and_ + // (pc, 15 - 2 * round, precompute_tx, precompute_ty) + // will be written to the lookup table. + // + // therefore, if `pc` corresponds to the elliptic curve point [P], we will write: + // | pc | 0 | -15[P].x | -15[P].y | + // | pc | 1 | -13[P].x | -13[P].y | + // | pc | 2 | -11[P].x | -11[P].y | + // | pc | 3 | -9[P].x | -9[P].y | + // | pc | 4 | -7[P].x | -7[P].y | + // | pc | 5 | -5[P].x | -5[P].y | + // | pc | 6 | -3[P].x | -3[P].y | + // | pc | 7 | -1[P].x | -1[P].y | + // | pc | 8 | [P].x | [P].y | + // | pc | 9 | 3[P].x | 3[P].y | + // | pc | 10 | 5[P].x | 5[P].y | + // | pc | 11 | 7[P].x | 7[P].y | + // | pc | 12 | 9[P].x | 9[P].y | + // | pc | 13 | 11[P].x | 11[P].y | + // | pc | 14 | 13[P].x | 13[P].y | + // | pc | 15 | 15[P].x | 15[P].y | + const auto& precompute_pc = View(in.precompute_pc); const auto& tx = View(in.precompute_tx); const auto& ty = View(in.precompute_ty); @@ -137,31 +156,6 @@ template class ECCVMLookupRelationImpl { const auto& beta_sqr = params.beta_sqr; const auto& beta_cube = params.beta_cube; - // slice value : (wnaf value) : lookup term - // 0 : -15 : 0 - // 1 : -13 : 1 - // 7 : -1 : 7 - // 8 : 1 : 0 - // 9 : 3 : 1 - // 15 : 15 : 7 - - // slice value : negative term : positive term - // 0 : 0 : 7 - // 1 : 1 : 6 - // 2 : 2 : 5 - // 3 : 3 : 4 - // 7 : 7 : 0 - - // | 0 | 15[P].x | 15[P].y | 0, -15[P].x, -15[P].y | 15, 15[P].x, 15[P].y | - // | 1 | 13[P].x | 13[P].y | 1, -13[P].x, -13[P].y | 14, 13[P].x, 13[P].y - // | 2 | 11[P].x | 11[P].y - // | 3 | 9[P].x | 9[P].y - // | 4 | 7[P].x | 7[P].y - // | 5 | 5[P].x | 5[P].y - // | 6 | 3[P].x | 3[P].y - // | 7 | 1[P].x | 1[P].y | 7, -[P].x, -[P].y | 8 , [P].x, [P].y | - - // todo optimize this? if constexpr (write_index == 0) { const auto positive_slice_value = -(precompute_round) + 15; const auto positive_term = @@ -180,8 +174,8 @@ template class ECCVMLookupRelationImpl { { using View = typename Accumulator::View; - // read term: - // pc, slice, x, y + // read term: (pc, compressed_slice, (2 * compressed_slice - 15)[P]) + // (the latter term is of course represented via an x and y coordinate.) static_assert(read_index < READ_TERMS); const auto& gamma = params.gamma; const auto& beta = params.beta; @@ -202,12 +196,12 @@ template class ECCVMLookupRelationImpl { const auto& msm_y3 = View(in.msm_y3); const auto& msm_y4 = View(in.msm_y4); - // how do we get pc value + // Recall that `pc` stands for point-counter. We recall how to compute the current pc. + // // row pc = value of pc after msm - // row count = num processed points in round - // size_of_msm = msm_size - // value of pc at start of msm = msm_pc - msm_size_of_msm - // value of current pc = msm_pc - msm_size_of_msm + msm_count + (0,1,2,3) + // msm_count = number of (128-bit) multiplications processed so far in current MSM round (NOT INCLUDING current + // row) current_pc = msm_pc - msm_count next_pc = current_pc - {0, 1, 2, 3}, depending on how many adds are + // performed in the current row. const auto current_pc = msm_pc - msm_count; if constexpr (read_index == 0) { @@ -254,4 +248,4 @@ template class ECCVMLookupRelationImpl { template using ECCVMLookupRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_msm_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_msm_relation_impl.hpp index fc66f0b795aa..5ff4cf133d17 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_msm_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_msm_relation_impl.hpp @@ -15,8 +15,9 @@ namespace bb { * @brief MSM relations that evaluate the Strauss multiscalar multiplication algorithm. * * @details - * The Strauss algorithm for a size-k MSM takes scalars/points (a_i, [P_i]) for i = 0 to k-1. - * The specific algoritm we use is the following: + * The Straus algorithm for a size-k MSM takes scalars/points (a_i, [P_i]) for i = 0 to k-1. + * The specific algorithm we use may be found [here](../../eccvm/README.md). We briefly reprise the + * algorithm nonetheless. * * PHASE 1: Precomputation (performed in ecc_wnaf_relation.hpp, ecc_point_table_relation.hpp) * Each scalar a_i is split into 4-bit WNAF slices s_{j, i} for j = 0 to 31, and a skew bool skew_i @@ -86,15 +87,14 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator const auto& msm_transition_shift = View(in.msm_transition_shift); const auto& round = View(in.msm_round); const auto& round_shift = View(in.msm_round_shift); - const auto& q_add = View(in.msm_add); + const auto& q_add = View(in.msm_add); // is 1 iff we are at an ADD row in Straus algorithm const auto& q_add_shift = View(in.msm_add_shift); const auto& q_skew = View(in.msm_skew); const auto& q_skew_shift = View(in.msm_skew_shift); - const auto& q_double = View(in.msm_double); + const auto& q_double = View(in.msm_double); // is 1 iff we are at an DOUBLE row in Straus algorithm const auto& q_double_shift = View(in.msm_double_shift); const auto& msm_size = View(in.msm_size_of_msm); - // const auto& msm_size_shift = View(in.msm_size_of_msm_shift); - const auto& pc = View(in.msm_pc); + const auto& pc = View(in.msm_pc); // pc stands for `point-counter`. const auto& pc_shift = View(in.msm_pc_shift); const auto& count = View(in.msm_count); const auto& count_shift = View(in.msm_count_shift); @@ -127,53 +127,142 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator */ /** - * @brief Constraining addition rounds + * @brief Constraining addition rounds via a multiset-equality check * + * @details * The boolean column q_add describes whether a round is an ADDITION round. - * The values of q_add are Prover-defined. We need to ensure they set q_add correctly. - * We rely on the following statements that we assume are constrained to be true (from other relations): - * 1. The set of reads into (pc, round, wnaf_slice) is constructed when q_add = 1 - * 2. The set of reads into (pc, round, wnaf_slice) must match the set of writes from the point_table columns - * 3. The set of writes into (pc, round, wnaf_slice) from the point table columns is correct - * 4. `round` only updates when `q_add = 1` at current row and `q_add = 0` at next row - * If a Prover sets `q_add = 0` when an honest Prover would set `q_add = 1`, - * this will produce an inequality in the set of reads / writes into the (pc, round, wnaf_slice) table. - * - * The addition algorithm has several IF/ELSE statements based on comparing `count` with `msm_size`. - * Instead of directly constraining these, we define 4 boolean columns `q_add1, q_add2, q_add3, q_add4`. - * Like `q_add`, their values are Prover-defined. We need to ensure they are set correctly. - * We update the above conditions on reads into (pc, round, wnaf_slice) to the following: - * 1. The set of reads into (pc_{count}, round, wnaf_slice_{count}) is constructed when q_add = 1 AND q_add1 = - * 1 - * 2. The set of reads into (pc_{count + 1}, round, wnaf_slice_{count + 1}) is constructed when q_add = 1 AND - * q_add2 = 1 - * 3. The set of reads into (pc_{count + 2}, round, wnaf_slice_{count + 2}) is constructed when q_add = 1 AND - * q_add3 = 1 - * 4. The set of reads into (pc_{count + 3}, round, wnaf_slice_{count + 3}) is constructed when q_add = 1 AND - * q_add4 = 1 - * - * To ensure that all q_addi values are correctly set we apply consistency checks to q_add1/q_add2/q_add3/q_add4: + * The values of q_add are Prover-defined. We need to ensure they set q_add correctly. We will do this via a + * multiset-equality check (formerly called a "strict lookup"), which allows the various tables to "communicate". + * On a high level, this table "reads" (pc, round, wnaf_slice), another table (Precomputed) "writes" + * a potentially different set of (pc, round, wnaf_slice), and we demand that the reads match the writes. + * Alternatively said, the MSM columns spawn a multiset of tuples of the form (pc, round, wnaf_slice), the + * Precomputed Table columns spawn a potentially different multiset of tuples of the form (pc, round, wnaf_slice), + * and we _check_ that these two multisets match. + * + * The above description does not reference how we will _prove_ that the two multisets are equal. As usual, we use a + * grand product argument. A happy byproduct of this is that we can use the grand product technique, which is + * powerful enough to allow our multiset equality testing to support _conditional adds_; this means that we only add + * a tuple if some particular condition occurs. + * + * This (pc, round, wnaf_slice) multiset equality testing is made more difficult by the fact that the values of + * `precomputed_pc` are _not the same_ as the values of `msm_pc`. The former indexes over every (non-trivial, 128 + * bit) scalar multiplication, while the latter jumps values and is constant on MSM rows corresponding to a fixed + * MSM. However, the transition values should match. + * + * Given a row of the MSM table, we have four selectors q_add1, q_add2, q_add3, q_add4, as well as a q_skew + * selector. For the MSM side of the multiset corresponding to (pc, round, wnaf_slice), we add: + * + * 1. (msm_pc - msm_count, round, wnaf_slice_{count}) when q_add1 = 1 + * 2. (msm_pc - msm_count - 1, round, wnaf_slice_{count + 1}) when q_add2 = 1 + * 3. (msm_pc - msm_count - 2, round, wnaf_slice_{count + 2}) when q_add3 = 1 + * 4. (msm_pc - msm_count - 3, round, wnaf_slice_{count + 3}) when q_add4 = 1 + * + * That this is "what we want" comes from the following facts: msm_pc is the number of (non-trivial, 128-bit) Point + * multiplications we have done _until the start of_ the current MSM, and `msm_count` is the number of Point * wNAF + * slice multiplications/lookups we have done _in this round_. (Recall that a round corresponds to a wNAF digit.) In + * particular, `msm_count` updates by the appropriate amount (usually 4, more accurately q_add1 + q_add2 + q_add3 + + * q_add4) per row of the MSM table. + * + * On the other side, given a row of the Precomputed columns, if `precompute_select == 1`, we add + * 1. (precompute_pc, 4 * precompute_round, w_1) + * 2. (precompute_pc, 4 * precompute_round + 1, w_2) + * 3. (precompute_pc, 4 * precompute_round + 2, w_3) + * 4. (precompute_pc, 4 * precompute_round + 3, w_4) + * 5. (precompute_pc, 4 * precompute_round + 4, precompute_skew) if precompute_point_transition == 1 + * + * ELSE `precompute_select == 0` and we add: + * 1. (0, 0, 0) + * + * Here, w_K is the compressed wNAF slices corresponding to `precompute_sKhi` and `precompute_sKlo`, for K ∈ {1, 2, + * 3, 4} and precompute_skew ∈ {0, 7}. + * + * SKETCH OF PROOF: We now argue that, under the following assumptions, if the multiset equality holds, then the + * `q_addK` and also `q_add` are all correctly constrained for K ∈ {1, 2, 3, 4}. + * 1. The Precomputed table is correctly constrained; in particular, the values `precompute_pc`, + * `precompute_round`, `precompute_skew`, `precompute_select`, and `wK` are all correctly constrained. + * 2. `round` monotonically increases from 0 to 32 before reseting back to 0. `round_shift - round == 1` + * precisely when `q_double == 1`. + * 3. `pc` is monotonic and only updates when there is an `msm_transition`. Here, it updates by `msm_size`, + * which must be constrained somewhere else by a multiset argument. We detail this below. + * 4. `q_add`, `q_skew`, and `q_double` are pairwise mutually exclusive. + * 5. `q_add1 == 1` iff either `q_add == 1` OR `q_skew == 1`. + * 6. The lookup table is implemented correctly. + * + * First of all, note the asymmetry: we do not explicitly add tuples corresponding to skew on the MSM side of the + * table. Indeeed, this is implicit with `msm_round == 32`. Now, the point is that the pair (pc, round) uniquely + * specifies the point + wNAF digit that we are processing (and adding to the accumulator) and both `pc` and `round` + * are directly constrained to be monotonic. + * + * Suppose the Prover sets `q_addK = 0` when an honest Prover would set `q_addK == 1`. Then there would be some (pc, + * round, wnaf_slice) that the Precomputed table added to its multiset that the prover did not add. The Prover can + * _never_ "compensate" for this, as `pc` is locally constrained to be monotonic and `round` is constrained to be + * periodic; this means that the Prover has "lost their chance" to add this element to the multiset and hence the + * multiset equality check will fail. + * + * Conversely, if the Prover sets `q_addK = 1` when it should be set to 0, there are several options: either + * we are at the end of a `round` (so e.g. `q_add4 ` _should_ be 0), or we are at a double row, or we are at a row + * that should be all 0s. In the first two cases, as long as the Precomputed table is correctly constrained, again + * we would be adding a tuple to the multiset that can never be hit by the Precomputed table due to `precompute_pc` + * monotonicty and `precompute_round` periodicity (enforced in the precomputed columns.). In the final case, the + * only way we don't break the multiset check is if `wnaf_slice == 0` for the corresponding `q_addK` that is on. But + * then the lookup argument will fail, as there is no corresponding point when `pc = 0`. (Here it is helpful to + * remember that `pc` stands for _point-counter_.) Note that this requires that `precompute_pc` is well-formed. + * + * + * We apply consistency/continuity checks to q_add1/q_add2/q_add3/q_add4: * 1. If q_add2 = 1, require q_add1 = 1 * 2. If q_add3 = 1, require q_add2 = 1 * 3. If q_add4 = 1, require q_add3 = 1 * 4. If q_add1_shift = 1 AND round does not update between rows, require q_add4 = 1 * - * We want to use all of the above to reason about the set of reads into (pc, round, wnaf_slice). - * The goal is to conclude that any case where the Prover incorrectly sets q_add/q_add1/q_add2/q_add3/q_add4 will - * produce a set inequality between the reads/writes into (pc, round, wnaf_slice) + */ + + /** + * @brief Constrain msm_size and output of MSM computation via multiset equality + * + * @details + * As explained in the section on constraining the addition wire values, to make everything work we also need to + * constrain `msm_size`, something directly computed in the Transcript columns. We also need to "send" the final + * output value of an MSM from the MSM table to the transcript table so it can continue its processing. (Send here + * is a euphemism for constrain.) We do this via a multiset equality check of the form: + * (pc, P.x, P.y, msm-size) + * From the perspective of the MSM table, we add such a tuple only at an `msm_transition`. The terms P.x and P.y + * refer to the output values of the MSM just computed by the MSM table. `msm_size` is the size of the _just + * completed_ MSM. + * + * + */ + + /** + * @brief Looking up the slice-point products {-15[P], -13[P], ..., 13[P], 15[P]} + * + * @details + * In the Point Table, for every point [P] that occurs in the MSM table, we compute the list of points: {-15[P], + * -13[P], ..., 13[P], 15[P]}. (Note that these never vanish, as we only send a point to each table if they are + * non-zero.) We then constrain the "slice products" that occur here via a lookup argument. For completeness, we + * briefly sketch this. + * + * The PointTable will "write" the following row to the lookup table: (pc, slice, x, y), where if `pc` corresponds + * to an elliptic curve point [P] (`pc` is a decreasing counter of the non-zero points that occur in our + * computation), slice ∈ {0, ..., 15}, and (x, y) are the affine coordinates of (2 * slice - 15)[P]. + * + * The MSM table will then read a row of the same form. This constrains the MSM table to have correctly used the + * wNAF * point in the Straus algorithm. + * */ /** * @brief Addition relation * - * All addition operations in ECCVMMSMRelationImpl are conditional additions! - * This method returns two Accumulators that represent x/y coord of output. - * Output is either an addition of inputs, or xa/ya dpeending on value of `selector`. - * Additionally, we require `lambda = 0` if `selector = 0`. - * The `collision_relation` accumulator tracks a subrelation that validates xb != xa. - * Repeated calls to this method will increase the max degree of the Accumulator output - * Degree of x_out, y_out = max degree of x_a/x_b + 1 - * 4 Iterations will produce an output degree of 6 + * All addition operations in ECCVMMSMRelationImpl are conditional additions, as we sometimes want to add values and + * other times simply want to propagate values. (consider, e.g., when `q_add2 == 0`.) This method returns two + * Accumulators that represent x/y coord of output. Output is either an addition of inputs (if `selector == 1`), or + * xa/ya (if `selector == 0`). Additionally, we require `lambda = 0` if `selector = 0`. The `collision_relation` + * accumulator tracks a subrelation that validates xb != xa. + * Repeated calls to this method will increase the max degree of the Accumulator output: + * deg(x_out) = 1 + max(deg(xa, xb)), deg(y_out) = max(1 + deg(x_out), 1 + deg(ya)) + * in our application, we chain together 4 of these with the pattern in such a way that the final x_out will have + * degree 5 and the final y_out will have degree 6. */ auto add = [&](auto& xb, auto& yb, @@ -183,14 +272,18 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator auto& selector, auto& relation, auto& collision_relation) { - // L * (1 - s) = 0 - // (combine) (L * (xb - xa - 1) - yb - ya) * s + L = 0 + // computation of lambda is valid: if q == 1, then L == (yb - ya) / (xb - xa) + // if q == 0, then L == 0. combining these into a single constraint yields: + // q * (L * (xb - xa - 1) - (yb - ya)) + L = 0 relation += selector * (lambda * (xb - xa - 1) - (yb - ya)) + lambda; collision_relation += selector * (xb - xa); - // x3 = L.L + (-xb - xa) * q + (1 - q) xa + // x_out = L.L + (-xb - xa) * q + (1 - q) xa + // deg L = 1, deg q = 1, min(deg(xa), deg(xb))≥ 1. + // hence deg(x_out) = 1 + max(deg(xa, xb)) auto x_out = lambda.sqr() + (-xb - xa - xa) * selector + xa; - // y3 = L . (xa - x3) - ya * q + (1 - q) ya + // y_out = L . (xa - x_out) - ya * q + (1 - q) ya + // hence deg(y_out) = max(1 + deg(x_out), 1 + deg(ya)) auto y_out = lambda * (xa - x_out) + (-ya - ya) * selector + ya; return std::array{ x_out, y_out }; }; @@ -223,7 +316,7 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator constexpr uint256_t oyu = offset_generator.y; const Accumulator xo(oxu); const Accumulator yo(oyu); - + // set (x, y) to be either accumulator if `selector == 0` or OFFSET if `selector == 1`. auto x = xo * selector + xb * (-selector + 1); auto y = yo * selector + yb * (-selector + 1); relation += lambda * (x - xa) - (y - ya); // degree 3 @@ -234,20 +327,20 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator }; // ADD operations (if row represents ADD round, not SKEW or DOUBLE) - Accumulator add_relation(0); + Accumulator add_relation(0); // validates the correctness of all elliptic curve additions. Accumulator x1_collision_relation(0); Accumulator x2_collision_relation(0); Accumulator x3_collision_relation(0); Accumulator x4_collision_relation(0); - // If msm_transition = 1, we have started a new MSM. We need to treat the current value of [Acc] as the point at + // If `msm_transition == 1`, we have started a new MSM. We need to treat the current value of [Acc] as the point at // infinity! - auto [x_t1, y_t1] = first_add(acc_x, acc_y, x1, y1, lambda1, msm_transition, add_relation, x1_collision_relation); - auto [x_t2, y_t2] = add(x2, y2, x_t1, y_t1, lambda2, add2, add_relation, x2_collision_relation); - auto [x_t3, y_t3] = add(x3, y3, x_t2, y_t2, lambda3, add3, add_relation, x3_collision_relation); - auto [x_t4, y_t4] = add(x4, y4, x_t3, y_t3, lambda4, add4, add_relation, x4_collision_relation); + auto [x_t1, y_t1] = + first_add(acc_x, acc_y, x1, y1, lambda1, msm_transition, add_relation, x1_collision_relation); // [deg 2, deg 3] + auto [x_t2, y_t2] = add(x2, y2, x_t1, y_t1, lambda2, add2, add_relation, x2_collision_relation); // [deg 3, deg 4] + auto [x_t3, y_t3] = add(x3, y3, x_t2, y_t2, lambda3, add3, add_relation, x3_collision_relation); // [deg 4, deg 5] + auto [x_t4, y_t4] = add(x4, y4, x_t3, y_t3, lambda4, add4, add_relation, x4_collision_relation); // [deg 5, deg 6] // Validate accumulator output matches ADD output if q_add = 1 - // (this is a degree-6 relation) std::get<0>(accumulator) += q_add * (acc_x_shift - x_t4) * scaling_factor; std::get<1>(accumulator) += q_add * (acc_y_shift - y_t4) * scaling_factor; std::get<2>(accumulator) += q_add * add_relation * scaling_factor; @@ -279,7 +372,8 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator * * As with additions, the column q_double describes whether row is a double round. It is Prover-defined. * The value of `msm_round` can only update when `q_double = 1` and we use this to ensure Prover correctly sets - * `q_double`. (see round transition relations further down) + * `q_double`. The reason for this is that `msm_round` witnesses the wNAF digit we are processing, and we only + * perform the four doublings when we are done processing a wNAF digit. See round transition relations further down. */ Accumulator double_relation(0); auto [x_d1, y_d1] = dbl(acc_x, acc_y, lambda1, double_relation); @@ -298,6 +392,10 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator * If scalar slice == 7, we add into accumulator (point_table[7] maps to -[P]) * If scalar slice == 0, we do not add into accumulator * i.e. for the skew round we can use the slice values as our "selector" when doing conditional point adds + * + * As with addition and doubling, the column q_skew is prover-defined. It is precisely turned on when the round + * is 32. We implement this constraint slightly differently. For more details, see the round transition relations + * below. */ Accumulator skew_relation(0); static FF inverse_seven = FF(7).invert(); @@ -320,7 +418,6 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator auto [x_s4, y_s4] = add(x4, y4, x_s3, y_s3, lambda4, skew4_select, skew_relation, x4_skew_collision_relation); // Validate accumulator output matches SKEW output if q_skew = 1 - // (this is a degree-6 relation) std::get<3>(accumulator) += q_skew * (acc_x_shift - x_s4) * scaling_factor; std::get<4>(accumulator) += q_skew * (acc_y_shift - y_s4) * scaling_factor; std::get<5>(accumulator) += q_skew * skew_relation * scaling_factor; @@ -332,7 +429,8 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator const auto add_second_point = add2 * q_add + q_skew * skew2_select; const auto add_third_point = add3 * q_add + q_skew * skew3_select; const auto add_fourth_point = add4 * q_add + q_skew * skew4_select; - // Step 2: construct the delta between x-coordinates for each point add (depending on if row is ADD or SKEW) + // Step 2: construct the difference a.k.a. delta between x-coordinates for each point add (depending on if row is + // ADD or SKEW) const auto x1_delta = x1_skew_collision_relation * q_skew + x1_collision_relation * q_add; const auto x2_delta = x2_skew_collision_relation * q_skew + x2_collision_relation * q_add; const auto x3_delta = x3_skew_collision_relation * q_skew + x3_collision_relation * q_add; @@ -343,66 +441,100 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator std::get<8>(accumulator) += (x3_delta * collision_inverse3 - add_third_point) * scaling_factor; std::get<9>(accumulator) += (x4_delta * collision_inverse4 - add_fourth_point) * scaling_factor; - // Validate that if q_add = 1 or q_skew = 1, add1 also is 1 - // TODO(@zac-williamson) Once we have a stable base to work off of, remove q_add1 and replace with q_msm_add + - // q_msm_skew (issue #2222) - std::get<32>(accumulator) += (add1 - q_add - q_skew) * scaling_factor; - - // If add_i = 0, slice_i = 0 // When add_i = 0, force slice_i to ALSO be 0 std::get<13>(accumulator) += (-add1 + 1) * slice1 * scaling_factor; std::get<14>(accumulator) += (-add2 + 1) * slice2 * scaling_factor; std::get<15>(accumulator) += (-add3 + 1) * slice3 * scaling_factor; std::get<16>(accumulator) += (-add4 + 1) * slice4 * scaling_factor; - // only one of q_skew, q_double, q_add can be nonzero + // SELECTORS ARE MUTUALLY EXCLUSIVE + // at most one of q_skew, q_double, q_add can be nonzero. + // note that as we can expect our table to be zero padded, we _do not_ insist that q_add + q_double + q_skew == 1. std::get<17>(accumulator) += (q_add * q_double + q_add * q_skew + q_double * q_skew) * scaling_factor; - // We look up wnaf slices by mapping round + pc -> slice - // We use an exact set membership check to validate that - // wnafs written in wnaf_relation == wnafs read in msm relation - // We use `add1/add2/add3/add4` to flag whether we are performing a wnaf read op - // We can set these to be Prover-defined as the set membership check implicitly ensures that the correct reads - // have occurred. - // if msm_transition = 0, round_shift - round = 0 or 1 - const auto round_delta = round_shift - round; + // Validate that if q_add = 1 or q_skew = 1, add1 also is 1 + // NOTE(#2222): could just get rid of add1 as a column, as it is a linear combination. + std::get<32>(accumulator) += (add1 - q_add - q_skew) * scaling_factor; + + // ROUND TRANSITION LOGIC + // `round_transition` describes whether we are transitioning between "rounds" of the MSM according to the Straus + // algorithm. In particular, the `round` corresponds to the wNAF digit we are currently processing. - // ROUND TRANSITION LOGIC (when round does not change) - // If msm_transition = 0 (next row) then round_delta = 0 or 1 + const auto round_delta = round_shift - round; + // If `msm_transition == 0` (next row) then `round_delta` is boolean; the round is internal to a given MSM and + // represents the wNAF digit currently being processed. `round_delta == 0` means that the current and next steps of + // the Straus algorithm are processing the same wNAF digit place. + + // `round_transition == 0` if `round_delta == 0` or the next row is an MSM transition. + // if `round_transition != 1`, then `round_transition == round_delta == 1` by the following constraint. + // in particular, `round_transition` is boolean. (`round_delta` is not boolean precisely one step before an MSM + // transition, but that does not concern us here.) const auto round_transition = round_delta * (-msm_transition_shift + 1); std::get<18>(accumulator) += round_transition * (round_delta - 1) * scaling_factor; - // ROUND TRANSITION LOGIC (when round DOES change) - // round_transition describes whether we are transitioning between rounds of an MSM - // If round_transition = 1, the next row is either a double (if round != 31) or we are adding skew (if round == - // 31) round_transition * skew * (round - 31) = 0 (if round tx and skew, round == 31) round_transition * (skew + - // double - 1) = 0 (if round tx, skew XOR double = 1) i.e. if round tx and round != 31, double = 1 + // If `round_transition == 1`, then `round_delta == 1` and `msm_transition_shift == 0`. Therefore, we wish to + // constrain next row in the VM to either be a double (if `round != 31`) or skew (if `round == 31`). In either case, + // the point is that we have finished processing a wNAF digit place and need to either perform the doublings to move + // on to the next place _or_ we are at the last place and need to perform the skew computation to finish. These are + // equationally represented as: + // round_transition * skew_shift * (round - 31) = 0 (if round tx and skew, then round == 31); + // round_transition * (skew_shift + double_shift - 1) = 0 (if round tx, then skew XOR double = 1). + // (-round_delta + 1) * q_double_shift = 1 (if q_double_shift == 1, then round_transition = 1) + // together, these have the following implications: if round tx and round != 31, then double_shift = 1. + // conversely, if round tx and double_shift == 0, then `q_skew_shift == 1` (which then forces `round == 31`). + // similarly, if q_double_shift == 1, then round_transition == 0, + // the fact that a round_transition occurs at the first time skew_shift == 1 follows from the fact that skew == 1 + // implies round == 32 and the above three relations, together with the _definition_ of round_transition. std::get<19>(accumulator) += round_transition * q_skew_shift * (round - 31) * scaling_factor; std::get<20>(accumulator) += round_transition * (q_skew_shift + q_double_shift - 1) * scaling_factor; - - // if no double or no skew, round_delta = 0 + std::get<35>(accumulator) += (-round_delta + 1) * q_double_shift * scaling_factor; + // if the next is neither double nor skew, and we are not at an msm_transition, then round_delta = 0 and the next + // "row" of our VM is processing the same wNAF digit place. std::get<21>(accumulator) += round_transition * (-q_double_shift + 1) * (-q_skew_shift + 1) * scaling_factor; - // if double, next double != 1 - std::get<22>(accumulator) += q_double * q_double_shift * scaling_factor; - - // if double, next add = 1 - std::get<23>(accumulator) += q_double * (-q_add_shift + 1) * scaling_factor; - - // updating count - // if msm_transition = 0 and round_transition = 0, count_shift = count + add1 + add2 + add3 + add4 - // todo: we need this? + // CONSTRAINING Q_DOUBLE AND Q_SKEW + // NOTE: we have already constrained q_add, q_skew, and q_double to be mutually exclusive. + + // if double, next add = 1. As q_double, q_add, and q_skew are mutually exclusive, this suffices to force + // q_double_shift == q_skew_shift == 0. + std::get<22>(accumulator) += q_double * (-q_add_shift + 1) * scaling_factor; + // if the current row has q_skew == 1 and the next row is _not_ an MSM transition, then q_skew_shift = 1. + // this forces q_skew to precisely correspond to the rows where `round == 32`. Indeed, note that the first q_skew + // bit is set correctly: + // round == 31, round_transition == 1 ==> q_skew_shift == 1. (if, to the contrary, q_double_shift == 1, then + // the q_add_shift_shift == 1, but we assume that we have correctly constrained the q_adds via the multiset + // argument. this means that q_double_shift == 0, which forces q_skew_shift == 1 because round_transition + // == 1.) + // this means that the first row with `round == 32` has q_skew == 1. then all subsequent q_skew entries must be 1, + // _until_ we start our new MSM. + std::get<33>(accumulator) += (-msm_transition_shift + 1) * q_skew * (-q_skew_shift + 1) * scaling_factor; + // if q_skew == 1, then round == 32. This is almost certainly redundant but psychologically useful to "constrain + // both ends". + std::get<34>(accumulator) += q_skew * (-round + 32) * scaling_factor; + + // UPDATING THE COUNT + + // if we are changing the `round` (i.e., starting to process a new wNAF digit or at an msm transition), the + // count_shift must be 0. + std::get<23>(accumulator) += round_delta * count_shift * scaling_factor; + // if msm_transition = 0 and round_transition = 0, then the next "row" of the VM is processing the same wNAF digit. + // this means that the count must increase: count_shift = count + add1 + add2 + add3 + add4 std::get<24>(accumulator) += (-msm_transition_shift + 1) * (-round_delta + 1) * (count_shift - count - add1 - add2 - add3 - add4) * scaling_factor; + // at least one of the following must be true: + // the next step is an MSM transition; + // the next count is zero (meaning we are starting the processing of a new wNAF digit) + // the next step is processing the same wNAF digit (i.e., round_delta == 0) + // (note that at the start of a new MSM, the count is also zero, so the above are not mutually exclusive.) std::get<25>(accumulator) += is_not_first_row * (-msm_transition_shift + 1) * round_delta * count_shift * scaling_factor; - // if msm_transition = 1, count_shift = 0 - std::get<26>(accumulator) += is_not_first_row * msm_transition_shift * count_shift * scaling_factor; + // if msm_transition = 1, then round = 0. + std::get<26>(accumulator) += msm_transition * round * scaling_factor; - // if msm_transition = 1, pc = pc_shift + msm_size - // `ecc_set_relation` ensures `msm_size` maps to `transcript.msm_count` for the current value of `pc` + // if msm_transition_shift = 1, pc = pc_shift + msm_size + // NB: `ecc_set_relation` ensures `msm_size` maps to `transcript.msm_count` for the current value of `pc` std::get<27>(accumulator) += is_not_first_row * msm_transition_shift * (msm_size + pc_shift - pc) * scaling_factor; // Addition continuity checks @@ -412,8 +544,7 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator // Case 3: add4 = 1, add3 = 0 // These checks ensure that the current row does not skip points (for both ADD and SKEW ops) // This is part of a wider set of checks we use to ensure that all point data is used in the assigned - // multiscalar multiplication operation. - // (and not in a different MSM operation) + // multiscalar multiplication operation (and not in a different MSM operation). std::get<28>(accumulator) += add2 * (-add1 + 1) * scaling_factor; std::get<29>(accumulator) += add3 * (-add2 + 1) * scaling_factor; std::get<30>(accumulator) += add4 * (-add3 + 1) * scaling_factor; @@ -432,6 +563,13 @@ void ECCVMMSMRelationImpl::accumulate(ContainerOverSubrelations& accumulator // when transition occurs, perform set membership lookup on (accumulator / pc / msm_size) // perform set membership lookups on add_i * (pc / round / slice_i) // perform lookups on (pc / slice_i / x / y) + + // We look up wnaf slices by mapping round + pc -> slice + // We use an exact set membership check to validate that + // wnafs written in wnaf_relation == wnafs read in msm relation + // We use `add1/add2/add3/add4` to flag whether we are performing a wnaf read op + // We can set these to be Prover-defined as the set membership check implicitly ensures that the correct reads + // have occurred. } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_point_table_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_point_table_relation_impl.hpp index ea62a7e00049..a212b3a62982 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_point_table_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_point_table_relation_impl.hpp @@ -57,24 +57,24 @@ void ECCVMPointTableRelationImpl::accumulate(ContainerOverSubrelations& accu * In the table, the point associated with `pc = 1` is labelled P. * the point associated with `pc = 0` is labelled Q. * - * | precompute_pc | precompute_point_transition | precompute_round | Tx | Ty | Dx | Dy | - * | -------- | ----------------------- | ----------- | ----- | ----- | ---- | ---- | - * | 1 | 0 | 0 |15P.x | 15P.y | 2P.x | 2P.y | - * | 1 | 0 | 1 |13P.x | 13P.y | 2P.x | 2P.y | - * | 1 | 0 | 2 |11P.x | 11P.y | 2P.x | 2P.y | - * | 1 | 0 | 3 | 9P.x | 9P.y | 2P.x | 2P.y | - * | 1 | 0 | 4 | 7P.x | 7P.y | 2P.x | 2P.y | - * | 1 | 0 | 5 | 5P.x | 5P.y | 2P.x | 2P.y | - * | 1 | 0 | 6 | 3P.x | 3P.y | 2P.x | 2P.y | - * | 1 | 1 | 7 | P.x | P.y | 2P.x | 2P.y | - * | 0 | 0 | 0 |15Q.x | 15Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 1 |13Q.x | 13Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 2 |11Q.x | 11Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 3 | 9Q.x | 9Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 4 | 7Q.x | 7Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 5 | 5Q.x | 5Q.y | 2Q.x | 2Q.y | - * | 0 | 0 | 6 | 3Q.x | 3Q.y | 2Q.x | 2Q.y | - * | 0 | 1 | 7 | Q.x | Q.y | 2Q.x | 2Q.y | + * | precompute_pc | precompute_point_transition | precompute_round | Tx | Ty | Dx | Dy | + * | ------------- | ---------------------------- | ------------------- | ----- | ----- | ---- | ---- | + * | 1 | 0 | 0 | 15P.x | 15P.y | 2P.x | 2P.y | + * | 1 | 0 | 1 | 13P.x | 13P.y | 2P.x | 2P.y | + * | 1 | 0 | 2 | 11P.x | 11P.y | 2P.x | 2P.y | + * | 1 | 0 | 3 | 9P.x | 9P.y | 2P.x | 2P.y | + * | 1 | 0 | 4 | 7P.x | 7P.y | 2P.x | 2P.y | + * | 1 | 0 | 5 | 5P.x | 5P.y | 2P.x | 2P.y | + * | 1 | 0 | 6 | 3P.x | 3P.y | 2P.x | 2P.y | + * | 1 | 1 | 7 | P.x | P.y | 2P.x | 2P.y | + * | 0 | 0 | 0 | 15Q.x | 15Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 1 | 13Q.x | 13Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 2 | 11Q.x | 11Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 3 | 9Q.x | 9Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 4 | 7Q.x | 7Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 5 | 5Q.x | 5Q.y | 2Q.x | 2Q.y | + * | 0 | 0 | 6 | 3Q.x | 3Q.y | 2Q.x | 2Q.y | + * | 0 | 1 | 7 | Q.x | Q.y | 2Q.x | 2Q.y | * * We apply the following relations to constrain the above table: * @@ -84,9 +84,12 @@ void ECCVMPointTableRelationImpl::accumulate(ContainerOverSubrelations& accu * * The relations that constrain `precompute_point_transition` and `precompute_pc` are in `ecc_wnaf_relation.hpp` * - * When precompute_point_transition = 1, we use a strict lookup protocol in `ecc_set_relation.hpp` to validate (pc, - * Tx, Ty) belong to the set of points present in our transcript columns. - * ("strict" lookup protocol = every item in the table must be read from once, and only once) + * When precompute_point_transition = 1, the next row corresponds to the beginning of the processing of a new point. + * We use a multiset-equality check, `ecc_set_relation.hpp` to validate (pc, Tx, Ty, scalar-multiplier) is the same + * as something derived from the transcript columns. In other words, the multiset equality check allows the tables + * to communicate, and in particular validates that we are populating our PointTable with precomputed values that + * indeed arise from the Transcript columns. (Formerly, we referred to this as a "strict" lookup protocol = every + * item in the table must be read from once, and only once) * * For every row, we use a lookup protocol in `ecc_lookup_relation.hpp` to write the following tuples into a lookup * table: @@ -102,15 +105,15 @@ void ECCVMPointTableRelationImpl::accumulate(ContainerOverSubrelations& accu * negative values produces the WNAF slice values that correspond to the multipliers for (Tx, Ty) and (Tx, -Ty): * * | Tx | Ty | x = 15 - precompute_round | 2x - 15 | y = precompute_round | 2y - 15 | - * | ----- | ----- | -------------------- | ------- | --------------- | ------- | - * | 15P.x | 15P.y | 15 | 15 | 0 | -15 | - * | 13P.x | 13P.y | 14 | 13 | 1 | -13 | - * | 11P.x | 11P.y | 13 | 11 | 2 | -11 | - * | 9P.x | 9P.y | 12 | 9 | 3 | -9 | - * | 7P.x | 7P.y | 11 | 7 | 4 | -7 | - * | 5P.x | 5P.y | 10 | 5 | 5 | -5 | - * | 3P.x | 3P.y | 9 | 3 | 6 | -3 | - * | P.x | P.y | 8 | 1 | 7 | -1 | + * | ----- | ----- | -------------------- | ------- | --------------- | ------- | + * | 15P.x | 15P.y | 15 | 15 | 0 | -15 | + * | 13P.x | 13P.y | 14 | 13 | 1 | -13 | + * | 11P.x | 11P.y | 13 | 11 | 2 | -11 | + * | 9P.x | 9P.y | 12 | 9 | 3 | -9 | + * | 7P.x | 7P.y | 11 | 7 | 4 | -7 | + * | 5P.x | 5P.y | 10 | 5 | 5 | -5 | + * | 3P.x | 3P.y | 9 | 3 | 6 | -3 | + * | P.x | P.y | 8 | 1 | 7 | -1 | */ /** @@ -161,9 +164,11 @@ void ECCVMPointTableRelationImpl::accumulate(ContainerOverSubrelations& accu * (x_3 + x_2 + x_1) * (x_2 - x_1)^2 - (y_2 - y_1)^2 = 0 * (y_3 + y_1) * (x_2 - x_1) + (x_3 - x_1) * (y_2 - y_1) = 0 * - * We don't need to check for incomplete point addition edge case (x_1 == x_2) - * TODO explain why (computing simple point multiples cannot trigger the edge cases, but need to serve a proof of - * this...) + * We don't need to check for incomplete point addition edge case (x_1 == x_2); the only cases this would correspond + * to are y2 == y1 or y2 == -y1. Both of these cases may be ruled out as follows. + * 1. y2 == y1. Then 2P == kP, where k∈{1, ..., 13}, which of course cannot happen because the order r of E(Fₚ) + * is a large prime and P is already assumed to not be the neutral element. + * 2. y2 == -y1. Again, then -2P == kP, k∈{1, ..., 13}, and we get the same contradiction. */ const auto& x1 = Tx_shift; const auto& y1 = Ty_shift; diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp index 0a75445dff4c..cf34584c2f83 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp @@ -22,13 +22,27 @@ template class ECCVMSetRelationImpl { 22, // grand product construction sub-relation 3 // left-shiftable polynomial sub-relation }; - + // prover optimization to allow for skipping the computation of sub-relations at certain points in sumcheck. template inline static bool skip(const AllEntities& in) { - // If z_perm == z_perm_shift, this implies that none of the wire values for the present input are involved in - // non-trivial copy constraints. The value of `transcript_mul` can be non-zero at the end of a long MSM of - // points-at-infinity, which will cause `full_msm_count` to be non-zero while `transcript_msm_count` vanishes. - // Therefore, we add this as a skip condition. + // For the first accumulator in the set relation, the added-on term is 0 if the following vanishes: + // + // `(z_perm + lagrange_first) * numerator_evaluation - (z_perm_shift + lagrange_last) * denominator_evaluation`, + // + // i.e., if z_perm is well-formed. + // + // For the second accumulator in the set relation, the added-on term is 0 if the following vanishes: + // + // `lagrange_last_short * z_perm_shift_short` + // + // To know when we can skip this computation (i.e., when it is "automatically" 0), most cases are handled by the + // condtion `z_perm == z_perf_shift`. In most circumstances, this implies that with overwhelming probability, + // none of the wire values for the present input are involved in non-trivial copy constraints. + // + // There are two other edge-cases we need to check for to know we can skip the computation. First, + // `transcript_mul` can be 1 even though the multiplication is "degenerate" (and not handled by the MSM table): + // this holds if either the scalar is 0 or the point is the neutral element. Therefore we force this. Second, we + // must force lagrange_last == 0. return (in.z_perm - in.z_perm_shift).is_zero() && in.transcript_mul.is_zero() && in.lagrange_last.is_zero(); } diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation_impl.hpp index 2919595912df..39482a7b2bcb 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation_impl.hpp @@ -12,25 +12,41 @@ namespace bb { /** - * @brief Performs list-equivalence checks for the ECCVM + * @brief Performs multiset equality checks for the ECCVM. This faciliates "communication" between disjoint sets of + * columns, which we view as tables: the Precomputed table, the MSM table, and the Transcript table. This used to be + * called a strict lookup argument (where every element written was read _exactly_ once.) * - * @details ECCVMSetRelationImpl validates the correctness of the inputs/outputs of the three main algorithms evaluated - * by the ECCVM. + * @details ECCVMSetRelationImpl validates the correctness of the "inputs"/"outputs" of the three main algorithms + * evaluated by the ECCVM. Note that the terminology of "inputs" and "outputs" is _purely psychological_; they each just + * name the multiset we are adding to. + * + * It will be helpful to recall that `pc` always stands for point-counter. We use the terms interchangably. + * + * FIRST TERM: tuple of (pc, round, wnaf_slice), computed when slicing scalar multipliers into slices, as part of + * ECCVMWnafRelation. * - * First term: tuple of (pc, round, wnaf_slice), computed when slicing scalar multipliers into slices, - * as part of ECCVMWnafRelation * Input source: ECCVMWnafRelation * Output source: ECCVMMSMRelation * * - * Second term: tuple of (point-counter, P.x, P.y, scalar-multiplier), used in ECCVMWnafRelation and - * ECCVMPointTableRelation + * + * SECOND TERM: tuple of (pc, P.x, P.y, scalar-multiplier), used in ECCVMWnafRelation. + * * Input source: ECCVMPointTableRelation - * Output source: ECCVMMSMRelation + * Output source: ECCVMTranscriptRelation + * + * Note that, from the latter table, this is only turned on when we are at a `mul` instruction. Similarly, from the + * former table, this is only turned on when `precompute_point_transition == 1`. + * + * THIRD TERM: tuple of (pc, P.x, P.y,msm-size) from ECCVMMSMRelation, to link the output of the MSM computation from + * the MSM table to the values in the Transcript tables. * - * Third term: tuple of (point-counter, P.x, P.y, msm-size) from ECCVMMSMRelation * Input source: ECCVMMSMRelation * Output source: ECCVMTranscriptRelation + * Note that, from the latter table, this is only turned on when we are at an MSM transition, so we don't record the + * "intermediate" `transcript_pc` values from the Transcript columns. This is compatible with the fact that the `msm_pc` + * values are _constant_ on a fixed MSM. + * * * @tparam FF * @tparam AccumulatorTypes @@ -57,11 +73,19 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE const auto& precompute_select = View(in.precompute_select); /** - * @brief First term: tuple of (pc, round, wnaf_slice), computed when slicing scalar multipliers into slices, - * as part of ECCVMWnafRelation. - * If precompute_select = 1, tuple entry = (wnaf-slice + point-counter * beta + msm-round * beta_sqr). - * There are 4 tuple entries per row. + * @brief First term: tuple of (pc, round, wnaf_slice), computed when slicing scalar multipliers into slices, as + * part of ECCVMWnafRelation. + * + * @details + * There are 4 tuple entries per row of the Precompute table. Moreover, the element that "increments" is + * 4 * `precompute_round`, due to the fact that the Precompute columns contain four "digits"/slices per row. + * + * @note + * We only add this tuple if `precompute_select == 1`. Otherwise, we add a the tuple (0, 0, 0). */ + + // OPTIMIZE(@zac-williamson #2226) optimize degrees + Accumulator numerator(1); // degree-0 { const auto& s0 = View(in.precompute_s1hi); @@ -71,7 +95,6 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE wnaf_slice += wnaf_slice; wnaf_slice += s1; - // TODO(@zac-williamson #2226) optimize const auto wnaf_slice_input0 = wnaf_slice + gamma + precompute_pc * beta + precompute_round4 * beta_sqr; numerator *= wnaf_slice_input0; // degree-1 } @@ -83,7 +106,6 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE wnaf_slice += wnaf_slice; wnaf_slice += s1; - // TODO(@zac-williamson #2226) optimize const auto wnaf_slice_input1 = wnaf_slice + gamma + precompute_pc * beta + (precompute_round4 + 1) * beta_sqr; numerator *= wnaf_slice_input1; // degree-2 } @@ -95,7 +117,6 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE wnaf_slice += wnaf_slice; wnaf_slice += s1; - // TODO(@zac-williamson #2226) optimize const auto wnaf_slice_input2 = wnaf_slice + gamma + precompute_pc * beta + (precompute_round4 + 2) * beta_sqr; numerator *= wnaf_slice_input2; // degree-3 } @@ -106,7 +127,6 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE auto wnaf_slice = s0 + s0; wnaf_slice += wnaf_slice; wnaf_slice += s1; - // TODO(@zac-williamson #2226) optimize const auto wnaf_slice_input3 = wnaf_slice + gamma + precompute_pc * beta + (precompute_round4 + 3) * beta_sqr; numerator *= wnaf_slice_input3; // degree-4 } @@ -121,15 +141,24 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE } { const auto& eccvm_set_permutation_delta = params.eccvm_set_permutation_delta; + // if `precompute_select == 1`, don't change the numerator. if it is 0, then to get the grand product argument + // to work (as we have zero-padded the rows of the MSM table), we must multiply by + // `eccvm_set_permutation_delta` == (γ)·(γ + β²)·(γ + 2β²)·(γ + 3β²) numerator *= precompute_select * (-eccvm_set_permutation_delta + 1) + eccvm_set_permutation_delta; // degree-7 } /** - * @brief Second term: tuple of (point-counter, P.x, P.y, scalar-multiplier), used in ECCVMWnafRelation and - * ECCVMPointTableRelation. ECCVMWnafRelation validates the sum of the wnaf slices associated with point-counter + * @brief Second term: tuple of (pc, P.x, P.y, scalar-multiplier), used in ECCVMWnafRelation and + * ECCVMPointTableRelation. + * + * @details + * ECCVMWnafRelation validates the sum of the wnaf slices associated with point-counter * equals scalar-multiplier. ECCVMPointTableRelation computes a table of muliples of [P]: { -15[P], -13[P], ..., - * 15[P] }. We need to validate that scalar-multiplier and [P] = (P.x, P.y) come from MUL opcodes in the transcript - * columns. + * 15[P] }. We need to validate that the scalar-multiplier and [P] = (P.x, P.y) come from MUL opcodes in the + * transcript columns; in other words, that the wNAF expansion of the scalar-multiplier is correct. + * + * @note + * We only add the tuple to the multiset if `precompute_point_transition == 1`. */ { const auto& table_x = View(in.precompute_tx); @@ -145,7 +174,8 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE return negative_inverse_seven; } }; - auto adjusted_skew = precompute_skew * negative_inverse_seven(); + auto adjusted_skew = + precompute_skew * negative_inverse_seven(); // `precompute_skew` ∈ {0, 7}, `adjusted_skew`∈ {0, -1} const auto& wnaf_scalar_sum = View(in.precompute_scalar_sum); const auto w0 = convert_to_wnaf(View(in.precompute_s1hi), View(in.precompute_s1lo)); @@ -168,7 +198,7 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE row_slice += row_slice; row_slice += row_slice; row_slice += row_slice; - row_slice += w3; + row_slice += w3; // row_slice = 2^12 w_0 + 2^8 w_1 + 2^4 w_2 + 2^0 w_3 auto scalar_sum_full = wnaf_scalar_sum + wnaf_scalar_sum; scalar_sum_full += scalar_sum_full; @@ -186,7 +216,8 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE scalar_sum_full += scalar_sum_full; scalar_sum_full += scalar_sum_full; scalar_sum_full += scalar_sum_full; - scalar_sum_full += row_slice + adjusted_skew; + scalar_sum_full += + row_slice + adjusted_skew; // scalar_sum_full = 2^16 * wnaf_scalar_sum + row_slice + adjusted_skew auto precompute_point_transition = View(in.precompute_point_transition); @@ -198,16 +229,21 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllE numerator *= point_table_init_read; // degree-9 } /** - * @brief Third term: tuple of (point-counter, P.x, P.y, msm-size) from ECCVMMSMRelation. + * @brief Third term: tuple of (pc, P.x, P.y, msm-size) from ECCVMMSMRelation. + * @brief Third term: tuple of (pc, P.x, P.y, msm-size) from ECCVMMSMRelation. * (P.x, P.y) is the output of a multi-scalar-multiplication evaluated in ECCVMMSMRelation. * We need to validate that the same values (P.x, P.y) are present in the Transcript columns and describe a - * multi-scalar multiplication of size `msm-size`, starting at `point-counter`. + * multi-scalar multiplication of size `msm-size`, starting at `pc`. + * multi-scalar multiplication of size `msm-size`, starting at `pc`. + * + * If `msm_transition_shift == 1`, this indicates the current row is the last row of a multiscalar + * multiplication evaluation. The output of the MSM will be present on `(msm_accumulator_x_shift, + * msm_accumulator_y_shift)`. The values of `msm_accumulator_x_shift, msm_accumulator_y_shift, msm_pc, + * msm_size_of_msm` must match up with equivalent values `transcript_msm_output_x, transcript_msm_output_y, + * transcript_pc, transcript_msm_count` present in the Transcript columns. * - * If msm_transition_shift = 1, this indicates the current row is the last row of a multiscalar - * multiplication evaluation. The output of the MSM will be present on `(msm_accumulator_x_shift, - * msm_accumulator_y_shift)`. The values of `msm_accumulator_x_shift, msm_accumulator_y_shift, msm_pc, - * msm_size_of_msm` must match up with equivalent values `transcript_msm_output_x, transcript_msm_output_y, - * transcript_pc, transcript_msm_count` present in the Transcript columns + * Checking `msm_size` is correct (it is tied to the `pc`) is necessary to make sure the `msm_pc` increments + * correctly after it completes an MSM. */ { const auto& lagrange_first = View(in.lagrange_first); @@ -246,8 +282,8 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_denominator(const Al { using View = typename Accumulator::View; - // TODO(@zac-williamson). The degree of this contribution is 17! makes overall relation degree 19. - // Can optimise by refining the algebra, once we have a stable base to iterate off of. + // OPTIMIZE(@zac-williamson). The degree of this contribution is 17! makes overall relation degree 19. + // Can potentially optimize by refining the algebra. const auto& gamma = params.gamma; const auto& beta = params.beta; const auto& beta_sqr = params.beta_sqr; @@ -295,44 +331,52 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_denominator(const Al } /** - * @brief Second term: tuple of (transcript_pc, transcript_Px, transcript_Py, z1) OR (transcript_pc, \lambda * - * transcript_Px, -transcript_Py, z2) for each scalar multiplication in ECCVMTranscriptRelation columns. (the latter - * term uses the curve endomorphism: \lambda = cube root of unity). These values must be equivalent to the second + * @brief Second term: tuple of the form `(transcript_pc, transcript_Px, transcript_Py, z1)` OR `(transcript_pc, + * \beta * transcript_Px, -transcript_Py, z2)` for each scalar multiplication in ECCVMTranscriptRelation columns. + * Here \f$\beta\f$ is a cube root of unity in \f$\mathbb f_q\f$. These values must be equivalent to the second * term values in `compute_grand_product_numerator` + * + * @details + * Recall that every element of \f$\mathbb F_r\f$ may be written as \f$z_1 + \zeta z_2 = z_1 - \beta z_2\f$, where + * the \f$z_i\f$ are 128 bit numbers and \f$\zeta = -\beta\f$ is a sixth root of unity. */ { const auto& transcript_pc = View(in.transcript_pc); - auto transcript_Px = View(in.transcript_Px); - auto transcript_Py = View(in.transcript_Py); - auto z1 = View(in.transcript_z1); - auto z2 = View(in.transcript_z2); - auto z1_zero = View(in.transcript_z1zero); - auto z2_zero = View(in.transcript_z2zero); - auto base_infinity = View(in.transcript_base_infinity); - auto transcript_mul = View(in.transcript_mul); + const auto& transcript_Px = View(in.transcript_Px); + const auto& transcript_Py = View(in.transcript_Py); + const auto& z1 = View(in.transcript_z1); + const auto& z2 = View(in.transcript_z2); + const auto& z1_zero = View(in.transcript_z1zero); + const auto& z2_zero = View(in.transcript_z2zero); + const auto& base_infinity = View(in.transcript_base_infinity); + const auto& transcript_mul = View(in.transcript_mul); - auto lookup_first = (-z1_zero + 1); - auto lookup_second = (-z2_zero + 1); - // FF endomorphism_base_field_shift = FF::cube_root_of_unity(); - FF endomorphism_base_field_shift = FF(bb::fq::cube_root_of_unity()); + const auto& lookup_first = (-z1_zero + 1); + const auto& lookup_second = (-z2_zero + 1); + FF cube_root_unity = FF(bb::fq::cube_root_of_unity()); auto transcript_input1 = transcript_pc + transcript_Px * beta + transcript_Py * beta_sqr + z1 * beta_cube; // degree = 1 - auto transcript_input2 = (transcript_pc - 1) + transcript_Px * endomorphism_base_field_shift * beta - + auto transcript_input2 = (transcript_pc - 1) + transcript_Px * cube_root_unity * beta - transcript_Py * beta_sqr + z2 * beta_cube; // degree = 2 - // | q_mul | z2_zero | z1_zero | base_infinity | lookup | - // | ----- | ------- | ------- | ------------- |----------------------- | - // | 0 | - | - | - | 1 | - // | 1 | 0 | 0 | 0 | 1 | - // | 1 | 0 | 1 | 0 | X + gamma | - // | 1 | 1 | 0 | 0 | Y + gamma | - // | 1 | 1 | 1 | 0 | (X + gamma)(Y + gamma) | - // | 1 | 0 | 0 | 1 | 1 | - // | 1 | 0 | 1 | 1 | 1 | - // | 1 | 1 | 0 | 1 | 1 | - // | 1 | 1 | 1 | 1 | 1 | + // The following diagram expresses a fingerprint of part of the tuple. It does not include `transcript_pc` and + // has not weighted the X and Y with beta and beta_sqr respectively. The point is nonetheless to show exactly + // when a tuple is added to the multiset: iff it corresponds to a non-trivial (128-bit) scalar mul. If neither + // z1 nor z2 are zero, then we implicitly add _two_ tuples to the multiset. + // + // | q_mul | z2_zero | z1_zero | base_infinity | partial lookup | + // | ----- | ------- | ------- | ------------- |----------------------- | + // | 0 | - | - | - | 1 | + // | 1 | 0 | 0 | 0 | 1 | + // | 1 | 0 | 1 | 0 | X + gamma | + // | 1 | 1 | 0 | 0 | Y + gamma | + // | 1 | 1 | 1 | 0 | (X + gamma)(Y + gamma) | + // | 1 | 0 | 0 | 1 | 1 | + // | 1 | 0 | 1 | 1 | 1 | + // | 1 | 1 | 0 | 1 | 1 | + // | 1 | 1 | 1 | 1 | 1 | transcript_input1 = (transcript_input1 + gamma) * lookup_first + (-lookup_first + 1); // degree 2 transcript_input2 = (transcript_input2 + gamma) * lookup_second + (-lookup_second + 1); // degree 3 @@ -342,30 +386,32 @@ Accumulator ECCVMSetRelationImpl::compute_grand_product_denominator(const Al // point_table_init_write = degree 7 auto point_table_init_write = transcript_mul * transcript_product + (-transcript_mul + 1); denominator *= point_table_init_write; // degree 17 - - // auto point_table_init_write_1 = transcript_mul * transcript_input1 + (-transcript_mul + 1); - // denominator *= point_table_init_write_1; // degree-11 - - // auto point_table_init_write_2 = transcript_mul * transcript_input2 + (-transcript_mul + 1); - // denominator *= point_table_init_write_2; // degree-14 } /** - * @brief Third term: tuple of (point-counter, P.x, P.y, msm-size) from ECCVMTranscriptRelation. + * @brief Third term: tuple of (pc, P.x, P.y, msm-size) from ECCVMTranscriptRelation. * (P.x, P.y) is the *claimed* output of a multi-scalar-multiplication evaluated in ECCVMMSMRelation. * We need to validate that the msm output produced in ECCVMMSMRelation is equivalent to the output present - * in `transcript_msm_output_x, transcript_msm_output_y`, for a given multi-scalar multiplication starting at - * `transcript_pc` and has size `transcript_msm_count` + * in `transcript_msm_output_x, transcript_msm_output_y`, for a given multi-scalar multiplication starting at + * `transcript_pc` and has size `transcript_msm_count`. + * @note In the case of an honest prover, `(transcript_msm_output_x, transcript_msm_output_y)` is the value of the + * just-completed MSM + `OFFSET` (as this is what the MSM table computes with to avoid branch logic.) + * + * in `transcript_msm_output_x, transcript_msm_output_y`, for a given multi-scalar multiplication starting at + * `transcript_pc` and has size `transcript_msm_count`. + * @note In the case of an honest prover, `(transcript_msm_output_x, transcript_msm_output_y)` is the value of the + * just-completed MSM + `OFFSET` (as this is what the MSM table computes with to avoid branch logic.) + * */ { - auto transcript_pc_shift = View(in.transcript_pc_shift); - auto transcript_msm_x = View(in.transcript_msm_x); - auto transcript_msm_y = View(in.transcript_msm_y); - auto transcript_msm_transition = View(in.transcript_msm_transition); - auto transcript_msm_count = View(in.transcript_msm_count); - auto z1_zero = View(in.transcript_z1zero); - auto z2_zero = View(in.transcript_z2zero); - auto transcript_mul = View(in.transcript_mul); - auto base_infinity = View(in.transcript_base_infinity); + const auto& transcript_pc_shift = View(in.transcript_pc_shift); + const auto& transcript_msm_x = View(in.transcript_msm_x); + const auto& transcript_msm_y = View(in.transcript_msm_y); + const auto& transcript_msm_transition = View(in.transcript_msm_transition); + const auto& transcript_msm_count = View(in.transcript_msm_count); + const auto& z1_zero = View(in.transcript_z1zero); + const auto& z2_zero = View(in.transcript_z2zero); + const auto& transcript_mul = View(in.transcript_mul); + const auto& base_infinity = View(in.transcript_base_infinity); // do not add to count if point at infinity! auto full_msm_count = diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation.hpp index 3db3096453df..8854b077b6a0 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation.hpp @@ -61,4 +61,4 @@ template class ECCVMTranscriptRelationImpl { template using ECCVMTranscriptRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation_impl.hpp index 4c6511bc1843..a52d6465e6a6 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_transcript_relation_impl.hpp @@ -55,46 +55,48 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu static const Accumulator oy(qy); return std::array{ ox, oy }; }; - auto z1 = View(in.transcript_z1); - auto z2 = View(in.transcript_z2); - auto z1_zero = View(in.transcript_z1zero); - auto z2_zero = View(in.transcript_z2zero); - auto op = View(in.transcript_op); - auto q_add = View(in.transcript_add); - auto q_mul = View(in.transcript_mul); - auto q_mul_shift = View(in.transcript_mul_shift); - auto q_eq = View(in.transcript_eq); - auto msm_transition = View(in.transcript_msm_transition); - auto msm_count = View(in.transcript_msm_count); - auto msm_count_shift = View(in.transcript_msm_count_shift); - auto pc = View(in.transcript_pc); - auto pc_shift = View(in.transcript_pc_shift); - auto transcript_accumulator_x_shift = View(in.transcript_accumulator_x_shift); - auto transcript_accumulator_y_shift = View(in.transcript_accumulator_y_shift); - auto transcript_accumulator_x = View(in.transcript_accumulator_x); - auto transcript_accumulator_y = View(in.transcript_accumulator_y); - auto transcript_msm_x = View(in.transcript_msm_intermediate_x); - auto transcript_msm_y = View(in.transcript_msm_intermediate_y); - auto transcript_Px = View(in.transcript_Px); - auto transcript_Py = View(in.transcript_Py); - auto is_accumulator_empty = -View(in.transcript_accumulator_not_empty) + 1; - auto lagrange_first = View(in.lagrange_first); - auto lagrange_last = View(in.lagrange_last); - auto is_accumulator_empty_shift = -View(in.transcript_accumulator_not_empty_shift) + 1; - auto q_reset_accumulator = View(in.transcript_reset_accumulator); - auto lagrange_second = View(in.lagrange_second); - auto transcript_Pinfinity = View(in.transcript_base_infinity); - auto transcript_Px_inverse = View(in.transcript_base_x_inverse); - auto transcript_Py_inverse = View(in.transcript_base_y_inverse); - auto transcript_add_x_equal = View(in.transcript_add_x_equal); - auto transcript_add_y_equal = View(in.transcript_add_y_equal); - auto transcript_add_lambda = View(in.transcript_add_lambda); - auto transcript_msm_infinity = View(in.transcript_msm_infinity); + const auto z1 = View(in.transcript_z1); + const auto z2 = View(in.transcript_z2); + const auto z1_zero = View(in.transcript_z1zero); + const auto z2_zero = View(in.transcript_z2zero); + const auto op = View(in.transcript_op); + const auto q_add = View(in.transcript_add); + const auto q_mul = View(in.transcript_mul); + const auto q_mul_shift = View(in.transcript_mul_shift); + const auto q_eq = View(in.transcript_eq); + const auto msm_transition = View(in.transcript_msm_transition); + const auto msm_count = View(in.transcript_msm_count); + const auto msm_count_shift = View(in.transcript_msm_count_shift); + const auto pc = View(in.transcript_pc); + const auto pc_shift = View(in.transcript_pc_shift); + const auto transcript_accumulator_x_shift = View(in.transcript_accumulator_x_shift); + const auto transcript_accumulator_y_shift = View(in.transcript_accumulator_y_shift); + const auto transcript_accumulator_x = View(in.transcript_accumulator_x); + const auto transcript_accumulator_y = View(in.transcript_accumulator_y); + const auto msm_count_zero_at_transition = View(in.transcript_msm_count_zero_at_transition); + const auto msm_count_at_transition_inverse = View(in.transcript_msm_count_at_transition_inverse); + const auto transcript_msm_x = View(in.transcript_msm_intermediate_x); + const auto transcript_msm_y = View(in.transcript_msm_intermediate_y); + const auto transcript_Px = View(in.transcript_Px); + const auto transcript_Py = View(in.transcript_Py); + const auto is_accumulator_empty = -View(in.transcript_accumulator_not_empty) + 1; + const auto lagrange_first = View(in.lagrange_first); + const auto lagrange_last = View(in.lagrange_last); + const auto is_accumulator_empty_shift = -View(in.transcript_accumulator_not_empty_shift) + 1; + const auto q_reset_accumulator = View(in.transcript_reset_accumulator); + const auto lagrange_second = View(in.lagrange_second); + const auto transcript_Pinfinity = View(in.transcript_base_infinity); + const auto transcript_Px_inverse = View(in.transcript_base_x_inverse); + const auto transcript_Py_inverse = View(in.transcript_base_y_inverse); + const auto transcript_add_x_equal = View(in.transcript_add_x_equal); + const auto transcript_add_y_equal = View(in.transcript_add_y_equal); + const auto transcript_add_lambda = View(in.transcript_add_lambda); + const auto transcript_msm_infinity = View(in.transcript_msm_infinity); - auto is_not_first_row = (-lagrange_first + 1); - auto is_not_last_row = (-lagrange_last + 1); - auto is_not_first_or_last_row = (-lagrange_first + -lagrange_last + 1); - auto is_not_infinity = (-transcript_Pinfinity + 1); + const auto is_not_first_row = (-lagrange_first + 1); + const auto is_not_last_row = (-lagrange_last + 1); + const auto is_not_first_or_last_row = (-lagrange_first + -lagrange_last + 1); + const auto is_not_infinity = (-transcript_Pinfinity + 1); /** * @brief Validate correctness of z1_zero, z2_zero. * If z1_zero = 0 and operation is a MUL, we will write a scalar mul instruction into our multiplication table. @@ -113,7 +115,6 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu * @brief Validate `op` opcode is well formed. * `op` is defined to be q_reset_accumulator + 2 * q_eq + 4 * q_mul + 8 * q_add, * where q_reset_accumulator, q_eq, q_mul, q_add are all boolean - * (TODO: bool constrain these efficiently #2223) */ auto tmp = q_add + q_add; tmp += q_mul; @@ -127,53 +128,78 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu * @brief Validate `pc` is updated correctly. * pc stands for Point Counter. It decrements by 1 for every 128-bit multiplication operation. * If q_mul = 1, pc decrements by !z1_zero + !z2_zero, else pc decrements by 0 - * @note pc starts out at its max value and decrements down to 0. This keeps the degree of the pc polynomial smol + * @note pc starts out at its max value and decrements down to 0. This keeps the degree of the pc polynomial small. + * we check that the last value is 0 later. */ Accumulator pc_delta = pc - pc_shift; auto num_muls_in_row = ((-z1_zero + 1) + (-z2_zero + 1)) * (-transcript_Pinfinity + 1); + // note that the value of `pc` in the first row is 0 because `pc` is shiftable. It is the second row where it starts + // out at its maximum value. std::get<3>(accumulator) += is_not_first_row * (pc_delta - q_mul * num_muls_in_row) * scaling_factor; // degree 4 /** - * @brief Validate `msm_transition` is well-formed. + * @brief Validate `msm_transition_zero_at_transition` is well-formed enough to bear witness to a correct + * computation. + * + * If the current row is the last `mul` instruction in a multiscalar multiplication (i.e., if the next row is not a + * `mul` instruction), then check that `msm_transition_zero_at_transition` is constrained as follows: if it is 0, + * then `msm_count + num_muls_in_row != 0` and is in fact the inverse of the (forced-to-be nonzero + * value of) `msm_count_at_transition_inverse`. * - * If the current row is the last mul instruction in a multiscalar multiplication, msm_transition = 1. - * i.e. if q_mul == 1 and q_mul_shift == 0, msm_transition = 1, else is 0 - * We also require that `msm_count + [current msm number] > 0` + * @note this does _not_ constrain `msm_transition_zero_at_transition` to vanish outside of syntactic + * transitions (meaning when current row is a `mul` and next row is not a `mul`). similarly, + * `msm_count_at_transition_inverse` is not forced to vanish away from syntactic transitions. However, neither of + * these is necessary to witness a valid computation: the values away from syntactic transitions may be arbitrary, + * as the values of these two wires are only used to validate computations _at syntactic transitions_. */ - auto msm_transition_check = q_mul * (-q_mul_shift + 1); // degree 2 - // auto num_muls_total = msm_count + num_muls_in_row; - auto msm_count_zero_at_transition = View(in.transcript_msm_count_zero_at_transition); - auto msm_count_at_transition_inverse = View(in.transcript_msm_count_at_transition_inverse); - auto msm_count_total = msm_count + num_muls_in_row; // degree 3 + // `msm_transition_check` is a _syntactic_ check that we could be in a transition. + // `msm_count_total` is the total number of (short) multiplications, _including_ the multiplications to be processed + // in this row. + auto msm_transition_check = q_mul * (-q_mul_shift + 1); // degree 2 + auto msm_count_total = msm_count + num_muls_in_row; // degree 2 + // `msm_count_at_transition_check` witnesses the claim that if `msm_count_zero_at_transition == 1`, then + // `msm_count_total == 0` and if `msm_count_zero_at_transition == 0` and we are at a syntactic transition, then + // `msm_count_total is invertible`. The way it does this is `msm_count_at_transition_inverse` is supposed to vanish + // *except* possibly at a syntactic transition. auto msm_count_zero_at_transition_check = msm_count_zero_at_transition * msm_count_total; msm_count_zero_at_transition_check += - (msm_count_total * msm_count_at_transition_inverse - 1) * (-msm_count_zero_at_transition + 1); - std::get<4>(accumulator) += msm_transition_check * msm_count_zero_at_transition_check * scaling_factor; // degree 3 + (msm_count_total * msm_count_at_transition_inverse - 1) * (-msm_count_zero_at_transition + 1); // degree 4 + // forces `msm_count_zero_at_transition` to have the following property at a syntactic transition: if + // `msm_count_zero_at_transition == 1`, then `msm_count_total == 0`. else if `msm_count_zero_at_transition == 0`, + // then `msm_count_total != 0` and is in fact the inverse of `msm_count_at_transition_inverse` (which is a witness + // column). + std::get<4>(accumulator) += msm_transition_check * msm_count_zero_at_transition_check * scaling_factor; // degree 6 - // Validate msm_transition_msm_count is correct - // ensure msm_transition is zero if count is zero + /** + * @brief Validate `msm_transition` is well-formed. + * + * If the current row is the last `mul` instruction in a multiscalar multiplication, and if the putative MSM will + * have a positive number of terms, then msm_transition = 1. i.e., if q_mul == 1 and q_mul_shift == 0, and + * `msm_count_total:= msm_count + num_muls_in_row > 0`, then `msm_transition` = 1, else 0. + */ std::get<5>(accumulator) += (msm_transition - msm_transition_check * (-msm_count_zero_at_transition + 1)) * scaling_factor; // degree 3 /** - * @brief Validate `msm_count` resets when we end a multiscalar multiplication. + * @brief Validate `msm_count` is 0 when we are not at a `mul` op. * msm_count tracks the number of scalar muls in the current active multiscalar multiplication. - * (if no msm active, msm_count == 0) - * If current row ends an MSM, `msm_count_shift = 0` (msm_count value at next row) + * (if no msm active, `msm_count == 0`) + * + * @note this in particular "resets" the msm_count when we are done with an msm. */ - std::get<6>(accumulator) += (msm_transition * msm_count_shift) * scaling_factor; // degree 2 + + std::get<6>(accumulator) += ((-q_mul + 1) * msm_count) * scaling_factor; // degree 2 /** * @brief Validate `msm_count` updates correctly for mul operations. - * msm_count updates by (!z1_zero + !z2_zero) if current op is a mul instruction (and msm is not terminating at next - * row). + * msm_count updates by (!z1_zero + !z2_zero) if current op is a mul instruction with the point _not_ the + * point-at-infinity and msm is not terminating at next row. */ - auto msm_count_delta = msm_count_shift - msm_count; // degree 4 - auto num_counts = ((-z1_zero + 1) + (-z2_zero + 1)) * (-transcript_Pinfinity + 1); - std::get<7>(accumulator) += - is_not_first_row * (-msm_transition + 1) * (msm_count_delta - q_mul * (num_counts)) * scaling_factor; + auto msm_count_delta = msm_count_shift - msm_count; + std::get<7>(accumulator) += is_not_first_row * (-msm_transition + 1) * (msm_count_delta - q_mul * num_muls_in_row) * + scaling_factor; // degree 5 /** * @brief Opcode exclusion tests. We have the following assertions: @@ -192,65 +218,86 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu * IF lhs and rhs are not at infinity THEN lhs == rhs * ELSE lhs and rhs are BOTH points at infinity **/ - auto both_infinity = transcript_Pinfinity * is_accumulator_empty; - auto both_not_infinity = (-transcript_Pinfinity + 1) * (-is_accumulator_empty + 1); - auto infinity_exclusion_check = transcript_Pinfinity + is_accumulator_empty - both_infinity - both_infinity; - auto eq_x_diff = transcript_Px - transcript_accumulator_x; - auto eq_y_diff = transcript_Py - transcript_accumulator_y; + auto both_infinity = transcript_Pinfinity * is_accumulator_empty; // degree 2 + auto both_not_infinity = (-transcript_Pinfinity + 1) * (-is_accumulator_empty + 1); // degree 2 + auto infinity_exclusion_check = + transcript_Pinfinity + is_accumulator_empty - both_infinity - both_infinity; // degree 2 + auto eq_x_diff = transcript_Px - transcript_accumulator_x; // degree 1 + auto eq_y_diff = transcript_Py - transcript_accumulator_y; // degree 1 auto eq_x_diff_relation = q_eq * (eq_x_diff * both_not_infinity + infinity_exclusion_check); // degree 4 auto eq_y_diff_relation = q_eq * (eq_y_diff * both_not_infinity + infinity_exclusion_check); // degree 4 std::get<9>(accumulator) += eq_x_diff_relation * scaling_factor; // degree 4 std::get<10>(accumulator) += eq_y_diff_relation * scaling_factor; // degree 4 /** - * @brief Initial condition check on 1st row. - * We require the following values are 0 on 1st row: - * is_accumulator_empty = 1 - * msm_count = 0 - * note...actually second row? bleurgh - * NOTE: we want pc = 0 at lagrange_last :o + * @brief Boundary conditions. + * The first "content" row is the _second_ row of the table. + * + * We demand that the following values are present in this first content row: + * `is_accumulator_empty == 1`; and + * `msm_count == 0`. + * We also demand that `pc == 0` at the last row. */ - std::get<11>(accumulator) += lagrange_second * (-is_accumulator_empty + 1) * scaling_factor; // degree 2 - std::get<12>(accumulator) += lagrange_second * msm_count * scaling_factor; // degree 2 + std::get<11>(accumulator) += lagrange_second * (-is_accumulator_empty + 1) * scaling_factor; // degree 2 + std::get<12>(accumulator) += (lagrange_second * msm_count + lagrange_last * pc) * scaling_factor; // degree 2 /** * @brief On-curve validation checks. - * If q_mul = 1 OR q_add = 1 OR q_eq = 1, require (transcript_Px, transcript_Py) is valid ecc point - * q_mul/q_add/q_eq mutually exclusive, can represent as sum of 3 + * If q_mul = 1 OR q_add = 1 OR q_eq = 1, require (transcript_Px, transcript_Py) is valid ecc point as long as the + * point-at-infinity flag is off. As q_mul, q_add, and q_eq are pairwise mutually exclusive, the value `q_add + + * q_mul + q_eq` is boolean. */ const auto validate_on_curve = q_add + q_mul + q_eq; const auto on_curve_check = transcript_Py * transcript_Py - transcript_Px * transcript_Px * transcript_Px - get_curve_b(); - std::get<13>(accumulator) += validate_on_curve * on_curve_check * is_not_infinity * scaling_factor; // degree 6 + std::get<13>(accumulator) += validate_on_curve * on_curve_check * is_not_infinity * scaling_factor; // degree 5 /** * @brief Validate relations from ECC Group Operations are well formed * */ { + // in the following, LHS is the new elliptic curve point and RHS is the accumulator. LHS can either be the + // explicit point in an `add` op or the result of an MSM at the end of an MSM. RHS is often referred to as A. Accumulator transcript_lambda_relation(0); - auto is_double = transcript_add_x_equal * transcript_add_y_equal; - auto is_add = (-transcript_add_x_equal + 1); + auto is_double = transcript_add_x_equal * transcript_add_y_equal; // degree 2 + // `is_add == 1` iff the op_code is `add` and the x-value of the point-to-add and the accumulator are _not_ + // equal. this ensures that it is not a double and that the result is not the point-at-infinity. + auto is_add = -transcript_add_x_equal + 1; // degree 1 + // `add_result_is_infinity == 1` iff the op_code is `add`, the x-value of the point-to-add and the accumulator + // are equal, and the y-values are unequal. then the result of the accumulation is of course the + // point-at-infinity. auto add_result_is_infinity = transcript_add_x_equal * (-transcript_add_y_equal + 1); // degree 2 - auto rhs_x = transcript_accumulator_x; - auto rhs_y = transcript_accumulator_y; - auto out_x = transcript_accumulator_x_shift; - auto out_y = transcript_accumulator_y_shift; - auto lambda = transcript_add_lambda; - auto lhs_x = transcript_Px * q_add + transcript_msm_x * msm_transition; - auto lhs_y = transcript_Py * q_add + transcript_msm_y * msm_transition; - auto lhs_infinity = transcript_Pinfinity * q_add + transcript_msm_infinity * msm_transition; - auto rhs_infinity = is_accumulator_empty; - auto result_is_lhs = rhs_infinity * (-lhs_infinity + 1); // degree 2 - auto result_is_rhs = (-rhs_infinity + 1) * lhs_infinity; // degree 2 - auto result_infinity_from_inputs = lhs_infinity * rhs_infinity; // degree 2 + auto rhs_x = transcript_accumulator_x; // degree 1 + auto rhs_y = transcript_accumulator_y; // degree 1 + auto out_x = transcript_accumulator_x_shift; // degree 1 + auto out_y = transcript_accumulator_y_shift; // degree 1 + auto lambda = transcript_add_lambda; // degree 1 + // note that `msm_transition` and `q_add` are mutually exclusive booleans. (they can also both be off.) + // therefore `(lhs_x, lhs_y)` is either the point in the `add` VM instruction _or_ the output of the + // just-completed MSM. + auto lhs_x = transcript_Px * q_add + transcript_msm_x * msm_transition; // degree 2 + auto lhs_y = transcript_Py * q_add + transcript_msm_y * msm_transition; // degree 2 + // `lhs_infinity == 1` iff the point being added to the accumulator is the point-at-infinity. + auto lhs_infinity = transcript_Pinfinity * q_add + transcript_msm_infinity * msm_transition; // degree 2 + auto rhs_infinity = is_accumulator_empty; // degree 1 + // `result_is_lhs == 1` iff the output of the operation is the LHS and is _not_ the point-at-infinity. + // `result_is_rhs == 1` iff the output of the operation is the RHS and is _not_ the point-at-infinity. + auto result_is_lhs = rhs_infinity * (-lhs_infinity + 1); // degree 2 + auto result_is_rhs = (-rhs_infinity + 1) * lhs_infinity; // degree 2 + // `result_infinity_from_inputs` checks if both the LHS && RHS are the point-at-infinity. this means that the + // result is the point-at-infinity from "pure-thought" reasons from the inputs. + auto result_infinity_from_inputs = lhs_infinity * rhs_infinity; // degree 2 + // `result_infinity_from_operation` tests if the operation is non-trivial and the output is the + // point-at-infinity. note we are using that our EC has no non-trivial rational 2-torsion. auto result_infinity_from_operation = transcript_add_x_equal * (-transcript_add_y_equal + 1); // degree 2 - // infinity_from_inputs and infinity_from_operation mutually exclusive so we can perform an OR by adding - // (mutually exclusive because if result_infinity_from_inputs then transcript_add_y_equal = 1 (both y are 0) + // `result_infinity_from_inputs` and `result_infinity_from_operation` are mutually exclusive (i.e., cannot both + // be 1), so we can perform an OR by adding. (they are mutually exclusive because if + // `result_infinity_from_inputs` then `transcript_add_y_equal == 1`.) auto result_is_infinity = result_infinity_from_inputs + result_infinity_from_operation; // degree 2 - auto any_add_is_active = q_add + msm_transition; + auto any_add_is_active = q_add + msm_transition; // degree 1 - // Valdiate `transcript_add_lambda` is well formed if we are adding msm output into accumulator + // Valdiate `transcript_add_lambda` is well formed if we are adding MSM output into accumulator { Accumulator transcript_msm_lambda_relation(0); auto msm_x = transcript_msm_x; @@ -270,18 +317,25 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu transcript_msm_lambda_relation += lambda_relation * is_double; // degree 4 } auto transcript_add_or_dbl_from_msm_output_is_valid = - (-transcript_msm_infinity + 1) * (-is_accumulator_empty + 1); // degree 2 + (-transcript_msm_infinity + 1) * (-is_accumulator_empty + 1); // degree 2 + // zero-out the value of `transcript_msm_lambda_relation` if output of MSM is point-at-infinity or the + // accumulator is point-at-infinity. (this case cannot be handled uniformly and will be handled by the + // following logic.) transcript_msm_lambda_relation *= transcript_add_or_dbl_from_msm_output_is_valid; // degree 6 // No group operation because of points at infinity { + // `lambda_relation_invalid != 0` means that lambda does not enter into our calculation for + // point-at-infinity reasons. in this case, `lambda` is constrained to be 0. auto lambda_relation_invalid = (transcript_msm_infinity + is_accumulator_empty + add_result_is_infinity); // degree 2 auto lambda_relation = lambda * lambda_relation_invalid; // degree 4 - transcript_msm_lambda_relation += lambda_relation; // (still degree 6) + transcript_msm_lambda_relation += lambda_relation; // degree 6 } + // relation is only touched if we are at an msm_transition transcript_lambda_relation = transcript_msm_lambda_relation * msm_transition; // degree 7 } // Valdiate `transcript_add_lambda` is well formed if we are adding base point into accumulator + // very similar to the above code for adding an MSM output. { Accumulator transcript_add_lambda_relation(0); auto add_x = transcript_Px; @@ -305,11 +359,14 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu transcript_add_lambda_relation *= transcript_add_or_dbl_from_add_output_is_valid; // degree 6 // No group operation because of points at infinity { + // `lambda_relation_invalid != 0` means that lambda does not enter into our calculation for + // point-at-infinity reasons. in this case, `lambda` is constrained to be 0. auto lambda_relation_invalid = (transcript_Pinfinity + is_accumulator_empty + add_result_is_infinity); // degree 2 auto lambda_relation = lambda * lambda_relation_invalid; // degree 4 - transcript_add_lambda_relation += lambda_relation; // (still degree 6) + transcript_add_lambda_relation += lambda_relation; // degree 6 } + // relation is only touched if we are at an `add` instruction. transcript_lambda_relation += transcript_add_lambda_relation * q_add; std::get<14>(accumulator) += transcript_lambda_relation * scaling_factor; // degree 7 } @@ -323,26 +380,32 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu * 4. Is 0 (reset) * 5. all opcode values are 0 */ - auto propagate_transcript_accumulator = (q_mul) * (-msm_transition + 1) + (q_eq * (-q_reset_accumulator + 1)); + + // accumulator is propagated if we are at a `mul` and _not_ at an `msm_transition` OR we are at an `eq` and we + // don't reset the accumulator. note that if `msm_transition_check == 1` (i.e., we are at a syntactic + // transition) but the total number of muls is 0, then the accumulator should indeed be propagated. + auto propagate_transcript_accumulator = + (q_mul) * (-msm_transition + 1) + (q_eq * (-q_reset_accumulator + 1)); // degree 2 { auto lambda_sqr = lambda * lambda; - // add relation that validates result_infinity_from_operation * result_is_infinity = 0 // N.B. these relations rely on the fact that `lambda = 0` if we are not evaluating add/double formula // (i.e. one or both outputs are points at infinity, or produce a point at infinity) // This should be validated by the lambda_relation auto x3 = lambda_sqr - lhs_x - rhs_x; // degree 2 + auto y3 = lambda * (lhs_x - out_x) - lhs_y; // degree 3 x3 += result_is_lhs * (rhs_x + lhs_x + lhs_x); // degree 4 x3 += result_is_rhs * (lhs_x + rhs_x + rhs_x); // degree 4 x3 += result_is_infinity * (lhs_x + rhs_x); // degree 4 - auto y3 = lambda * (lhs_x - out_x) - lhs_y; // degree 3 y3 += result_is_lhs * (lhs_y + lhs_y); // degree 4 y3 += result_is_rhs * (lhs_y + rhs_y); // degree 4 y3 += result_is_infinity * lhs_y; // degree 4 - + // internal to the Transcript columns, the point-at-infinity is encoded as `(0, 0)`. + // this is implicit in the subsequent computations: e.g. if `result_is_infinity`, then `(x3, y3) == (0, 0)`, + // or if `q_reset_accumulator == 1`, then `(out_x, out_y) == (0, 0)`. auto add_point_x_relation = (x3 - out_x) * any_add_is_active; // degree 5 add_point_x_relation += - propagate_transcript_accumulator * is_not_last_row * (out_x - transcript_accumulator_x); + propagate_transcript_accumulator * is_not_last_row * (out_x - transcript_accumulator_x); // degree 4 // validate out_x = 0 if q_reset_accumulator = 1 add_point_x_relation += (out_x * q_reset_accumulator); auto add_point_y_relation = (y3 - out_y) * any_add_is_active; // degree 5 @@ -351,17 +414,19 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu // validate out_y = 0 if q_reset_accumulator = 1 add_point_y_relation += (out_y * q_reset_accumulator); auto opcode_is_zero = - (is_not_first_row) * (-q_add + 1) * (-q_mul + 1) * (-q_reset_accumulator + 1) * (-q_eq + 1); - add_point_x_relation += (out_x * opcode_is_zero); - add_point_y_relation += (out_y * opcode_is_zero); + (is_not_first_row) * (-q_add + 1) * (-q_mul + 1) * (-q_reset_accumulator + 1) * (-q_eq + 1); // degree 5 + add_point_x_relation += (out_x * opcode_is_zero); // degree 6 + add_point_y_relation += (out_y * opcode_is_zero); // degree 6 - std::get<15>(accumulator) += add_point_x_relation * scaling_factor; // degree 5 - std::get<16>(accumulator) += add_point_y_relation * scaling_factor; // degree 5 + std::get<15>(accumulator) += add_point_x_relation * scaling_factor; // degree 6 + std::get<16>(accumulator) += add_point_y_relation * scaling_factor; // degree 6 } - // step 1: subtract offset generator from msm_accumulator - // this might produce a point at infinity + // subtract offset generator from msm_accumulator. this might produce a point at infinity { + // the fundamental relation is: `(transcript_msm_x, transcript_msm_y) - offset == + // `(transcript_msm_intermediate_x, transcript_msm_intermediate_y)`. in other words, `(transcript_msm_x, + // transcript_msm_y)` is the _shifted_ value of the MSM. const auto offset = offset_generator(); const auto x1 = offset[0]; const auto y1 = -offset[1]; @@ -373,11 +438,14 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu // cases: // x2 == x1, y2 == y1 // x2 != x1 - // (x2 - x1) const auto x_term = (x3 + x2 + x1) * (x2 - x1) * (x2 - x1) - (y2 - y1) * (y2 - y1); // degree 3 const auto y_term = (x1 - x3) * (y2 - y1) - (x2 - x1) * (y1 + y3); // degree 2 - // IF msm_infinity = false, transcript_msm_intermediate_x/y is either the result of subtracting offset - // generator from msm_x/y IF msm_infinity = true, transcript_msm_intermediate_x/y is 0 + // If `transcript_msm_infinity == 0`, then `(transcript_msm_intermediate_x, transcript_msm_intermediate_y)` + // is the result of subtracting offset generator from `(transcript_msm_x, transcript_msm_y)`. If + // `transcript_msm_infinity == 1`, then both `transcript_msm_intermediate_x ==0` and + // `transcript_msm_intermediate_y == 0`. + // + // again, point-at-infinity is represented internally in the Transcript columns by `(0, 0)`. const auto transcript_offset_generator_subtract_x = x_term * (-transcript_msm_infinity + 1) + transcript_msm_infinity * x3; // degree 4 const auto transcript_offset_generator_subtract_y = @@ -387,13 +455,14 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu std::get<18>(accumulator) += msm_transition * transcript_offset_generator_subtract_y * scaling_factor; // degree 5 - // validate transcript_msm_infinity is correct - // if transcript_msm_infinity = 1, (x2 == x1) and (y2 + y1 == 0) + // validate `transcript_msm_infinity` is correct + // if `transcript_msm_infinity == 1`, then both `x2 == x1` and `y2 + y1 == 0`. (this is because `(x1, y1)` + // is the negative of the offset.) const auto x_diff = x2 - x1; const auto y_sum = y2 + y1; std::get<19>(accumulator) += msm_transition * transcript_msm_infinity * x_diff * scaling_factor; // degree 3 std::get<20>(accumulator) += msm_transition * transcript_msm_infinity * y_sum * scaling_factor; // degree 3 - // if transcript_msm_infinity = 1, then x_diff must have an inverse + // if `transcript_msm_infinity == 0`, then `x_diff` must have an inverse const auto transcript_msm_x_inverse = View(in.transcript_msm_x_inverse); const auto inverse_term = (-transcript_msm_infinity + 1) * (x_diff * transcript_msm_x_inverse - 1); std::get<21>(accumulator) += msm_transition * inverse_term * scaling_factor; // degree 3 @@ -423,7 +492,8 @@ void ECCVMTranscriptRelationImpl::accumulate(ContainerOverSubrelations& accu * If lhs_x == rhs_x, transcript_add_x_equal = 1 * If transcript_add_x_equal = 0, a valid inverse must exist for (lhs_x - rhs_x) */ - auto x_diff = lhs_x - rhs_x; // degree 2 + auto x_diff = lhs_x - rhs_x; // degree 2 + // recall that transcript_Px_inverse is the claimed inverse of `x_diff`. auto x_product = transcript_Px_inverse * (-transcript_add_x_equal + 1) + transcript_add_x_equal; // degree 2 auto x_constant = transcript_add_x_equal - 1; // degree 1 auto transcript_add_x_equal_check_relation = (x_diff * x_product + x_constant) * any_add_is_active; // degree 5 diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp index fba0dae261bb..ade42e0a1621 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp @@ -18,19 +18,22 @@ namespace bb { * | point_transition | round | slices | skew | scalar_sum | * | ---------------- | ----- | --------------- | ------ | ------------------------------- | * | 0 | 0 | s0,s1,s2,s3 | 0 | 0 | - * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{31 - i} | - * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{31 - i} | - * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{31 - i} | - * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{31 - i} | - * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{31 - i} | - * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{31 - i} | - * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{31 - i} | + * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{3 - i} | + * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{7 - i} | + * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{11 - i} | + * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{15 - i} | + * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{19 - i} | + * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{23 - i} | + * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{27 - i} | * * The value of the input scalar is equal to the following: * - * scalar = 2^16 * scalar_sum + 2^12 * s31 + 2^8 * s30 + 2^4 * s29 + s28 - s_skew - * We use a set equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input - * scalar for a given value of `pc`. + * scalar = 2^16 * scalar_sum + 2^12 * s28 + 2^8 * s29 + 2^4 * s30 + s31 - s_skew + * + * We use a multiset equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input + * scalar for a given value of `pc` (i.e., for a given non-trivial EC point). In other words, this constrains that the + * wNAF expansion is correct. Note that, from the perpsective of the Precomputed table, we only add the tuple (pc, + * round, slice) to the multiset when point_transition == 1. * * The column `point_transition` is committed to by the Prover, we must constrain it is correctly computed (see * `ecc_point_table_relation.cpp` for details) @@ -54,4 +57,4 @@ template class ECCVMWnafRelationImpl { template using ECCVMWnafRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp index 81a931360ccb..226373d50a8b 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp @@ -18,22 +18,25 @@ namespace bb { * | point_transition | round | slices | skew | scalar_sum | * | ---------------- | ----- | --------------- | ------ | ------------------------------- | * | 0 | 0 | s0,s1,s2,s3 | 0 | 0 | - * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{31 - i} | - * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{31 - i} | - * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{31 - i} | - * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{31 - i} | - * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{31 - i} | - * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{31 - i} | - * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{31 - i} | + * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{3 - i} | + * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{7 - i} | + * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{11 - i} | + * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{15 - i} | + * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{19 - i} | + * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{23 - i} | + * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{27 - i} | * * The value of the input scalar is equal to the following: * - * scalar = 2^16 * scalar_sum + 2^12 * s31 + 2^8 * s30 + 2^4 * s29 + s28 - s_skew - * We use a set equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input - * scalar for a given value of `pc`. + * scalar = 2^16 * scalar_sum + 2^12 * s28 + 2^8 * s29 + 2^4 * s30 + s31 - s_skew * - * The column `point_transition` is committed to by the Prover, we must constrain it is correctly computed (see - * `ecc_point_table_relation.cpp` for details) + * We use a multiset equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input + * scalar for a given value of `pc` (i.e., for a given non-trivial EC point). In other words, this constrains that the + * wNAF expansion is correct. Note that, from the perpsective of the Precomputed table, we only add the tuple (pc, + * round, slice) to the multiset when point_transition == 1. + * + * Furthermore, as the column `point_transition` is committed to by the Prover, we must constrain it is correctly + * computed (see also `ecc_point_table_relation.cpp` for a description of what the table looks like.) * * @tparam FF * @tparam AccumulatorTypes @@ -49,11 +52,11 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato using View = typename Accumulator::View; auto scalar_sum = View(in.precompute_scalar_sum); - auto scalar_sum_new = View(in.precompute_scalar_sum_shift); + auto scalar_sum_shift = View(in.precompute_scalar_sum_shift); auto q_transition = View(in.precompute_point_transition); auto round = View(in.precompute_round); auto round_shift = View(in.precompute_round_shift); - auto pc = View(in.precompute_pc); + auto pc = View(in.precompute_pc); // note that this is a _point-counter_. auto pc_shift = View(in.precompute_pc_shift); // precompute_select is a boolean column. We only evaluate the ecc_wnaf_relation and the ecc_point_table_relation if // `precompute_select=1` @@ -71,6 +74,9 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato acc += ((s - 1).sqr() - 1) * ((s - 2).sqr() - 1) * scaling_factor; }; + // given two 2-bit numbers `s0, `s1`, convert to a wNAF digit (in {-15, -13, ..., 13, 15}) via the formula: + // `2(4s0 + s1) - 15`. (Here, `4s0 + s1` represents the 4-bit number corresponding to the concatenation of `s0` and + // `s1`.) const auto convert_to_wnaf = [](const View& s0, const View& s1) { auto t = s0 + s0; t += t; @@ -80,7 +86,9 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato }; const auto scaled_transition = q_transition * scaling_factor; - const auto scaled_transition_is_zero = -scaled_transition + scaling_factor; + const auto scaled_transition_is_zero = + -scaled_transition + scaling_factor; // `scaling_factor * (1 - q_transition)`, i.e., is the scaling_factor if we + // are _not_ at a transition, else 0. /** * @brief Constrain each of our scalar slice chunks (s1, ..., s8) to be 2 bits. * Doing range checks this way vs permutation-based range check removes need to create sorted list + grand product @@ -125,9 +133,11 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato * i.e. next_scalar_sum - 2^{16} * current_scalar_sum - 2^12 * w_0 - 2^8 * w_1 - 2^4 * w_2 - w_3 = 0 * @note We only perform slice_consistency check when next row is processing the same scalar as the current row! * i.e. when q_transition = 0 - * TODO(@zac-williamson) Optimize WNAF use (#2224) + * Note(@zac-williamson): improve WNAF use (#2224) */ - auto row_slice = w0; + auto row_slice = w0; // row_slice will eventually contain the truncated scalar corresponding to the current row, + // which is 2^12 * w_0 + 2^8 * w_1 + 2^4 * w_2 + w_3. (If one just looks at the wNAF digits in + // this row, this is the resulting odd number. Note that it is not necessarily positive.) row_slice += row_slice; row_slice += row_slice; row_slice += row_slice; @@ -144,46 +154,70 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato row_slice += row_slice; row_slice += w3; auto sum_delta = scalar_sum * FF(1ULL << 16) + row_slice; - const auto check_sum = scalar_sum_new - sum_delta; + const auto check_sum = scalar_sum_shift - sum_delta; std::get<8>(accumulator) += precompute_select * check_sum * scaled_transition_is_zero; /** - * @brief Round transition logic. + * @brief Transition logic with `round` and `q_transition`. * Goal: `round` is an integer in [0, ... 7] that tracks how many slices we have processed for a given scalar. - * i.e. number of 4-bit WNAF slices processed = round * 4. - * We apply the following constraints: - * If q_transition = 0, round increments by 1 between rows. - * If q_transition = 1, round value at current row = 7 - * If q_transition = 1, round value at next row = 0 - * Question: is this sufficient? We don't actually range constrain `round` (expensive if we don't need to!). - * Let us analyze... - * 1. When `q_transition = 1`, we use a set membership check to map the tuple of (pc, scalar_sum) into a set. - * We compare this set with an equivalent set generated from the transcript columns. The sets must match. - * 2. Only case where, at row `i`, a Prover can set `round` to value > 7 is if `q_transition = 0` for all j > i. - * `precompute_pc` decrements by 1 when `q_transition` = 1 - * We can infer from 1, 2, that if `round > 7`, the resulting wnafs will map into a set at a value of `pc` that is - * greater than all valid msm pc values (assuming the set equivalence check on the scalar sums is satisfied). - * The resulting msm output of such a computation cannot be mapped into the set of msm outputs in - * the transcript columns (see relations in ecc_msm_relation.cpp). - * Conclusion: not applying a strict range-check on `round` does not affect soundness (TODO(@zac-williamson) - * validate this! #2225) + * i.e., the number of 4-bit WNAF slices processed = round * 4. + * We must ensure that `q_transition` is well-formed and that `round` is correctly constrained. Recall that `pc` + * stands for point-counter. + * + * For the former, we force the following: + * 1. When `q_transition == 1`, then `scalar_sum_shift == 0`, `round_shift == 0`, `round == 7`, and `pc_shift + * == pc - 1`. + * 2. When `q_transition == 0`, then `round_shift - round == 1` and `pc_shift == pc` + * + * For the latter: note that we don't actually range-constrain `round` (expensive if we don't need to!). We + * nonetheless can correctly constrain `round`, because of the multiset checks. There are two multiset equality + * checks that we perform that implicate the wNAF relation: + * 1. (pc, msm_round, wnaf_slice) + * 2. (pc, P.x, P.y, scalar-multiplier) + * The first is used to communicate with the MSM table, to validate that the slice * point values the MSM tables use + * are indeed what we have precomputed. The second facilitates communication with the Transcript table, to ensure + * that the wNAF expansion of the scalar is indeed correct. Moreover, the second is only "sent" to the multiset when + * `q_transition == 1`. (It is helpful to recall that `pc` is monotonic: one per each point involved in a + * non-trivial scalar multiplication.) + * + * Here is the logic. We must ensure that `round` can never be set to a value > 7. If this were possible at row `i`, + * then `q_transition == 0` for all subsequent rows by the incrementing logic. There are (at least) two problems. + * + * 1. The implicit MSM round (accounted for in (1)) is between `4 * round` and `4 * round + 3` (in fact `4 * + * round + 4` iff we are at a skew). As the `round` must increment, this means that the `msm_round` will be + * larger than 32, which can't happen due to the internal constraints in the MSM table. In particular, the multiset + * equality check will fail, as the MSM tables can never send an entry with a round larger than 32. + * + * 2. This forces `precompute_pc` to be constant from here on out. This will violate the multiset equalities both + * of terms (1) _and_ (2). For the former, we will write too many entries with the given `pc`. (However, we've + * already shown how this multset equality fails due to `round`.) More importantly, for the latter, we will _never_ + * "send" the tuple (pc, P.x, P.x, scalar-multiplier) to the multiset, for this value of `pc` and all potentially + * subsequent values. We explicate this latter failure. The transcript table will certainly fill _some_ values in + * for (pc, P.x, P.y, scalar-multipler) (at least with correct pc and scalar-multiplier values), which will cause + * the multiset equality check to fail. + * + * As always, we are relying on the monotonicity of the `pc` in these arguments. + * */ - // We combine checks 0, 1 into a single relation + + // We combine two checks into a single relation // q_transition * (round - 7) + (-q_transition + 1) * (round_shift - round - 1) // => q_transition * (round - 7 - round_shift + round + 1) + (round_shift - round - 1) // => q_transition * (2 * round - round_shift - 6) + (round_shift - round - 1) const auto round_check = round_shift - round - 1; - std::get<9>(accumulator) += precompute_select * scaled_transition * ((round - round_check - 7) + round_check); - std::get<10>(accumulator) += precompute_select * scaled_transition * round_shift; + std::get<9>(accumulator) += + precompute_select * (scaled_transition * (round - round_check - 7) + scaling_factor * round_check); + std::get<10>(accumulator) += + precompute_select * scaled_transition * round_shift; // at a transition, next round == 0 /** - * @brief Scalar transition checks. + * @brief Scalar transition/PC checks. * 1: if q_transition = 1, scalar_sum_new = 0 * 2: if q_transition = 0, pc at next row = pc at current row * 3: if q_transition = 1, pc at next row = pc at current row - 1 (decrements by 1) * (we combine 2 and 3 into a single relation) */ - std::get<11>(accumulator) += precompute_select * scalar_sum_new * scaled_transition; + std::get<11>(accumulator) += precompute_select * scaled_transition * scalar_sum_shift; // (2, 3 combined): q_transition * (pc - pc_shift - 1) + (-q_transition + 1) * (pc_shift - pc) // => q_transition * (-2 * (pc_shift - pc) - 1) + (pc_shift - pc) const auto pc_delta = pc_shift - pc; @@ -201,6 +235,8 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato */ std::get<13>(accumulator) += precompute_select * (precompute_skew * (precompute_skew - 7)) * scaling_factor; + // Set slices (a.k.a. compressed digits), pc, and round all to zero when `precompute_select == 0`. + // (this is for one of the multiset equality checks.) const auto precompute_select_zero = (-precompute_select + 1) * scaling_factor; std::get<14>(accumulator) += precompute_select_zero * (w0 + 15); std::get<15>(accumulator) += precompute_select_zero * (w1 + 15); @@ -210,7 +246,7 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato std::get<18>(accumulator) += precompute_select_zero * round; std::get<19>(accumulator) += precompute_select_zero * pc; - // TODO(@zac-williamson #2226) + // Note(@zac-williamson #2226) // if precompute_select = 0, validate pc, round, slice values are all zero // If we do this we can reduce the degree of the set equivalence relations // (currently when checking pc/round/wnaf tuples from WNAF columns match those from MSM columns, diff --git a/barretenberg/cpp/src/barretenberg/relations/elliptic_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/elliptic_relation.hpp index 299eba0b7dbb..4fcf166d36d7 100644 --- a/barretenberg/cpp/src/barretenberg/relations/elliptic_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/elliptic_relation.hpp @@ -54,8 +54,6 @@ template class EllipticRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("Elliptic::accumulate"); - using Accumulator = typename std::tuple_element_t<0, ContainerOverSubrelations>; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; auto x_3_m = CoefficientAccumulator(in.w_r_shift); @@ -126,4 +124,4 @@ template class EllipticRelationImpl { }; template using EllipticRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp index 197391461460..04126f02ccf8 100644 --- a/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp @@ -173,7 +173,7 @@ template class LogDerivLookupRelationImpl { auto& relation_parameters, const size_t circuit_size) { - PROFILE_THIS_NAME("Lookup::compute_logderivative_inverse"); + BB_BENCH_NAME("Lookup::compute_logderivative_inverse"); auto& inverse_polynomial = get_inverse_polynomial(polynomials); size_t min_iterations_per_thread = 1 << 6; // min number of iterations for which we'll spin up a unique thread @@ -253,7 +253,6 @@ template class LogDerivLookupRelationImpl { const Parameters& params, const FF& scaling_factor) { - PROFILE_THIS_NAME("Lookup::accumulate"); // declare the accumulator of the maximum length, in non-ZK Flavors, they are of the same length, // whereas in ZK Flavors, the accumulator corresponding log derivative lookup argument sub-relation is the // longest diff --git a/barretenberg/cpp/src/barretenberg/relations/memory_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/memory_relation.hpp index 7fb6074a6522..b94e255468a2 100644 --- a/barretenberg/cpp/src/barretenberg/relations/memory_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/memory_relation.hpp @@ -70,7 +70,6 @@ template class MemoryRelationImpl { const Parameters& params, const FF& scaling_factor) { - PROFILE_THIS_NAME("Memory::accumulate"); // all accumulators are of the same length, so we set our accumulator type to (arbitrarily) be the first one. // if there were one that were shorter, we could also profitably use a `ShortAccumulator` type. however, // that is not the case here. diff --git a/barretenberg/cpp/src/barretenberg/relations/non_native_field_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/non_native_field_relation.hpp index d67f4ef56ca5..9e0ca675e8cf 100644 --- a/barretenberg/cpp/src/barretenberg/relations/non_native_field_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/non_native_field_relation.hpp @@ -51,7 +51,6 @@ template class NonNativeFieldRelationImpl { [[maybe_unused]] const Parameters& params, const FF& scaling_factor) { - PROFILE_THIS_NAME("NonNativeField::accumulate"); // all accumulators are of the same length, so we set our accumulator type to (arbitrarily) be the first one. // if there were one that were shorter, we could also profitably use a `ShortAccumulator` type. however, // that is not the case here. diff --git a/barretenberg/cpp/src/barretenberg/relations/permutation_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/permutation_relation.hpp index 51992d63a4f5..6626b3961d5b 100644 --- a/barretenberg/cpp/src/barretenberg/relations/permutation_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/permutation_relation.hpp @@ -124,7 +124,6 @@ template class UltraPermutationRelationImpl { const Parameters& params, const FF& scaling_factor) { - PROFILE_THIS_NAME("Permutation::accumulate"); // Contribution (1) using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; using View = typename Accumulator::View; @@ -206,4 +205,4 @@ template class UltraPermutationRelationImpl { template using UltraPermutationRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_external_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_external_relation.hpp index 47bde947a153..7512579fd841 100644 --- a/barretenberg/cpp/src/barretenberg/relations/poseidon2_external_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_external_relation.hpp @@ -29,28 +29,60 @@ template class Poseidon2ExternalRelationImpl { } /** - * @brief Expression for the poseidon2 external round relation, based on E_i in Section 6 of + * @brief Expression for the poseidon2 external round relation, based on \f$ E_i \f$ in Section 6 of * https://eprint.iacr.org/2023/323.pdf. - * @details This relation is defined as C(in(X)...) := - * q_poseidon2_external * ( (v1 - w_1_shift) + \alpha * (v2 - w_2_shift) + - * \alpha^2 * (v3 - w_3_shift) + \alpha^3 * (v4 - w_4_shift) ) = 0 where: - * u1 := (w_1 + q_1)^5 - * u2 := (w_2 + q_2)^5 - * u3 := (w_3 + q_3)^5 - * u4 := (w_4 + q_4)^5 - * t0 := u1 + u2 (1, 1, 0, 0) - * t1 := u3 + u4 (0, 0, 1, 1) - * t2 := 2 * u2 + t1 = 2 * u2 + u3 + u4 (0, 2, 1, 1) - * t3 := 2 * u4 + t0 = u1 + u2 + 2 * u4 (1, 1, 0, 2) - * v4 := 4 * t1 + t3 = u1 + u2 + 4 * u3 + 6 * u4 (1, 1, 4, 6) - * v2 := 4 * t0 + t2 = 4 * u1 + 6 * u2 + u3 + u4 (4, 6, 1, 1) - * v1 := t3 + v2 = 5 * u1 + 7 * u2 + 1 * u3 + 3 * u4 (5, 7, 1, 3) - * v3 := t2 + v4 (1, 3, 5, 7) + * @details For state \f$ \mathbf{u} = (u_1, u_2, u_3, u_4)\f$ with \f$ u_i = \big(w_i + c_i^{(i)}\big)^5 \f$, the + * external round computes \f$ \mathbf{v} = M_E \cdot \mathbf{u}^{\top}\f$, where \f$M_E\f$ is the external round + * matrix defined as follows: + * + * \f[ + * M_E = + * \begin{bmatrix} + * 5 & 7 & 1 & 3 \\ + * 4 & 6 & 1 & 1 \\ + * 1 & 3 & 5 & 7 \\ + * 1 & 1 & 4 & 6 + * \end{bmatrix} + * \f] + * + * i.e. + * \f{align}{ + * v_1 &= 5u_1 + 7u_2 + u_3 + 3u_4 \\ + * v_2 &= 4u_1 + 6u_2 + u_3 + u_4 \\ + * v_3 &= u_1 + 3u_2 + 5u_3 + 7u_4 \\ + * v_4 &= u_1 + u_2 + 4u_3 + 6u_4 + * \f} + * + * The relation enforces \f$ v_k = w_{k,shift}\f$ for \f$ k \in \{1,2,3,4\}\f$. + * Concretely, the relation is encoded as four independent constraints multiplied by the + * \f$\text{q_poseidon2_external}\f$ selector and the scaling factor \f$\hat{g}\f$ arising from the + * `GateSeparatorPolynomial`. These contributions are added to the corresponding univariate accumulator \f$ A_i + * \f$: + * \f{align}{ + * A_1 &\;\mathrel{+}= \text{q_poseidon2_internal}\cdot \big(v_1 - w_{1,\text{shift}}\big) \cdot \hat{g} \\ + * A_2 &\;\mathrel{+}= \text{q_poseidon2_internal}\cdot \big(v_1 - w_{1,\text{shift}}\big) \cdot \hat{g} \\ + * A_3 &\;\mathrel{+}= \text{q_poseidon2_internal}\cdot \big(v_3 - w_{3,\text{shift}}\big) \cdot \hat{g} \\ + * A_4 &\;\mathrel{+}= \text{q_poseidon2_internal}\cdot \big(v_4 - w_{4,\text{shift}}\big) \cdot \hat{g} + * \f} + * At the end of each Sumcheck Round, the subrelation accumulators are aggregated with independent challenges + * \f$\alpha_{i} = \alpha_{i, \text{Poseidon2Ext}}\f$ taken from the array of `SubrelationSeparators` + * \f[ + * \alpha_{0} A_1 + + * \alpha_{1} A_2 + + * \alpha_{2} A_3 + + * \alpha_{3} A_4 + * \f] + * and multiplied by the linear factor of the `GateSeparatorPolynomial`. + * + * @param evals a tuple of tuples of univariate accumulators, the subtuple corresponding to this relation consists + * of \f$ [A_0, A_1, A_2, A_3]\f$ , such that + * \f$ \deg(A_i) = \text{SUBRELATION_PARTIAL_LENGTHS}[i] - 1 \f$. + * @param in In round \f$ k \f$ of Sumcheck at the point \f$i_{>k} = (i_{k+1}, \ldots, i_{d-1})\f$ on the + * \f$d-k-1\f$-dimensional hypercube, given by an array containing the restrictions of the prover polynomials + * \f$ P_i(u_{k}) \f$. + * @param parameters Not used in this relation + * @param scaling_factor scaling term coming from `GateSeparatorPolynomial`. * - * @param evals transformed to `evals + C(in(X)...)*scaling_factor` - * @param in an std::array containing the fully extended Univariate edges. - * @param parameters contains beta, gamma, and public_input_delta, .... - * @param scaling_factor optional term to scale the evaluation before adding to evals. */ template void static accumulate(ContainerOverSubrelations& evals, @@ -58,73 +90,75 @@ template class Poseidon2ExternalRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("PoseidonExt::accumulate"); + // Univariates of degree 6 represented in Lagrange basis using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + // Low-degree univariates represented in monomial basis using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; - auto w_l = CoefficientAccumulator(in.w_l); - auto w_r = CoefficientAccumulator(in.w_r); - auto w_o = CoefficientAccumulator(in.w_o); - auto w_4 = CoefficientAccumulator(in.w_4); - auto w_l_shift = CoefficientAccumulator(in.w_l_shift); - auto w_r_shift = CoefficientAccumulator(in.w_r_shift); - auto w_o_shift = CoefficientAccumulator(in.w_o_shift); - auto w_4_shift = CoefficientAccumulator(in.w_4_shift); - auto q_l = CoefficientAccumulator(in.q_l); - auto q_r = CoefficientAccumulator(in.q_r); - auto q_o = CoefficientAccumulator(in.q_o); - auto q_4 = CoefficientAccumulator(in.q_4); - auto q_poseidon2_external = CoefficientAccumulator(in.q_poseidon2_external); + + // Current state + const auto w_1 = CoefficientAccumulator(in.w_l); + const auto w_2 = CoefficientAccumulator(in.w_r); + const auto w_3 = CoefficientAccumulator(in.w_o); + const auto w_4 = CoefficientAccumulator(in.w_4); + // Expected state, contained in the next row + const auto w_1_shift = CoefficientAccumulator(in.w_l_shift); + const auto w_2_shift = CoefficientAccumulator(in.w_r_shift); + const auto w_3_shift = CoefficientAccumulator(in.w_o_shift); + const auto w_4_shift = CoefficientAccumulator(in.w_4_shift); + // i-th external round constants + const auto c_1 = CoefficientAccumulator(in.q_l); + const auto c_2 = CoefficientAccumulator(in.q_r); + const auto c_3 = CoefficientAccumulator(in.q_o); + const auto c_4 = CoefficientAccumulator(in.q_4); + // Poseidon2 external relation selector + const auto q_poseidon2_external = CoefficientAccumulator(in.q_poseidon2_external); // add round constants which are loaded in selectors - auto s1 = Accumulator(w_l + q_l); - auto s2 = Accumulator(w_r + q_r); - auto s3 = Accumulator(w_o + q_o); - auto s4 = Accumulator(w_4 + q_4); + auto sbox = [](const Accumulator& x) { + auto t2 = x.sqr(); // x^2 + auto t4 = t2.sqr(); // x^4 + return t4 * x; // x^5 + }; // apply s-box round - auto u1 = s1.sqr(); - u1 = u1.sqr(); - u1 *= s1; - auto u2 = s2.sqr(); - u2 = u2.sqr(); - u2 *= s2; - auto u3 = s3.sqr(); - u3 = u3.sqr(); - u3 *= s3; - auto u4 = s4.sqr(); - u4 = u4.sqr(); - u4 *= s4; - - // matrix mul v = M_E * u with 14 additions + auto u1 = sbox(Accumulator(w_1 + c_1)); + auto u2 = sbox(Accumulator(w_2 + c_2)); + auto u3 = sbox(Accumulator(w_3 + c_3)); + auto u4 = sbox(Accumulator(w_4 + c_4)); + // Matrix mul v = M_E * u with 14 additions. + // Precompute common summands. auto t0 = u1 + u2; // u_1 + u_2 auto t1 = u3 + u4; // u_3 + u_4 auto t2 = u2 + u2; // 2u_2 t2 += t1; // 2u_2 + u_3 + u_4 auto t3 = u4 + u4; // 2u_4 t3 += t0; // u_1 + u_2 + 2u_4 + + // Row 4: u_1 + u_2 + 4u_3 + 6u_4 auto v4 = t1 + t1; v4 += v4; - v4 += t3; // u_1 + u_2 + 4u_3 + 6u_4 + v4 += t3; + + // Row 2: 4u_1 + 6u_2 + u_3 + u_4 auto v2 = t0 + t0; v2 += v2; - v2 += t2; // 4u_1 + 6u_2 + u_3 + u_4 - auto v1 = t3 + v2; // 5u_1 + 7u_2 + u_3 + 3u_4 - auto v3 = t2 + v4; // u_1 + 3u_2 + 5u_3 + 7u_4 + v2 += t2; + // Row 1: 5u_1 + 7u_2 + u_3 + 3u_4 + auto v1 = t3 + v2; + + // Row 3: u_1 + 3u_2 + 5u_3 + 7u_4 + auto v3 = t2 + v4; auto q_pos_by_scaling = Accumulator(q_poseidon2_external * scaling_factor); - auto tmp = q_pos_by_scaling * (v1 - Accumulator(w_l_shift)); - std::get<0>(evals) += tmp; + std::get<0>(evals) += q_pos_by_scaling * (v1 - Accumulator(w_1_shift)); - tmp = q_pos_by_scaling * (v2 - Accumulator(w_r_shift)); - std::get<1>(evals) += tmp; + std::get<1>(evals) += q_pos_by_scaling * (v2 - Accumulator(w_2_shift)); - tmp = q_pos_by_scaling * (v3 - Accumulator(w_o_shift)); - std::get<2>(evals) += tmp; + std::get<2>(evals) += q_pos_by_scaling * (v3 - Accumulator(w_3_shift)); - tmp = q_pos_by_scaling * (v4 - Accumulator(w_4_shift)); - std::get<3>(evals) += tmp; + std::get<3>(evals) += q_pos_by_scaling * (v4 - Accumulator(w_4_shift)); }; }; template using Poseidon2ExternalRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_internal_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_internal_relation.hpp index fc5ff917fbfa..06ec09b41afb 100644 --- a/barretenberg/cpp/src/barretenberg/relations/poseidon2_internal_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_internal_relation.hpp @@ -21,33 +21,78 @@ template class Poseidon2InternalRelationImpl { 7, // internal poseidon2 round sub-relation for fourth value }; + static constexpr fr D1 = crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[0]; // decremented by 1 + static constexpr fr D2 = crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[1]; // decremented by 1 + static constexpr fr D3 = crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[2]; // decremented by 1 + static constexpr fr D4 = crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[3]; // decremented by 1 + static constexpr fr D1_plus_1 = fr{ 1 } + D1; /** * @brief Returns true if the contribution from all subrelations for the provided inputs is identically zero * */ template inline static bool skip(const AllEntities& in) { - return in.q_poseidon2_internal.is_zero(); + return (in.q_poseidon2_internal.is_zero()); } /** - * @brief Expression for the poseidon2 internal round relation, based on I_i in Section 6 of + * @brief Expression for the Poseidon2 internal round relation, based on I_i in Section 6 of * https://eprint.iacr.org/2023/323.pdf. - * @details This relation is defined as C(in(X)...) := - * q_poseidon2_internal * ( (v1 - w_1_shift) + \alpha * (v2 - w_2_shift) + - * \alpha^2 * (v3 - w_3_shift) + \alpha^3 * (v4 - w_4_shift) ) = 0 where: - * u1 := (w_1 + q_1)^5 - * sum := u1 + w_2 + w_3 + w_4 - * v1 := u1 * D1 + sum - * v2 := w_2 * D2 + sum - * v3 := w_3 * D3 + sum - * v4 := w_4 * D4 + sum - * Di is the ith internal diagonal value - 1 of the internal matrix M_I * - * @param evals transformed to `evals + C(in(X)...)*scaling_factor` - * @param in an std::array containing the fully extended Univariate edges. - * @param parameters contains beta, gamma, and public_input_delta, .... - * @param scaling_factor optional term to scale the evaluation before adding to evals. + * @details Let the internal round matrix M_I be the 4×4 matrix + * \f[ + * M_I = + * \begin{bmatrix} + * D_1 + 1 & 1 & 1 & 1 \\ + * 1 & D_2 + 1 & 1 & 1 \\ + * 1 & 1 & D_3 + 1 & 1 \\ + * 1 & 1 & 1 & D_4 + 1 + * \end{bmatrix}, + * \quad + * \text{where } D_i \text{ are the diagonal entries of } M_I. + * \f] + * + * Define the state + * \f[ + * u_1 = \big(w_1 + \hat{c}_0^{(i)}\big)^{5},\qquad + * u_2 = w_2,\quad + * u_3 = w_3,\quad + * u_4 = w_4,\qquad + * \mathbf{u} = (u_1,u_2,u_3,u_4). + * \f] + * The internal round computes \f$ \mathbf{v} = M_I \cdot \mathbf{u}^{\top} \f$ and the relation enforces + * \f$ v_k = w_{k,\mathrm{shift}} \f$ for \f$ k \in \{1,2,3,4\} \f$: + * \f{align*} + * v_1 &= D_1\,u_1 + u_2 + u_3 + u_4,\\ + * v_2 &= u_1 + D_2\,u_2 + u_3 + u_4,\\ + * v_3 &= u_1 + u_2 + D_3\,u_3 + u_4,\\ + * v_4 &= u_1 + u_2 + u_3 + D_4\,u_4, + * \f} + * where \f$ \hat{c}_0^{(i)} \f$ is the internal round constant (provided via the \f$ q_l \f$ selector). + * + * Concretely, the relation is encoded as four independent constraints multiplied by the + * \f$\text{q_poseidon2_external}\f$ selector and the scaling factor \f$\hat{g}\f$ arising from the + * `GateSeparatorPolynomial`. These contributions are added to the corresponding univariate accumulators + * \f$ A_k \f$ (one per subrelation): + * \f{align*} + * A_1 &\;\mathrel{+}= q_{\mathrm{poseidon2\_internal}}\cdot\big(v_1 - w_{1,\mathrm{shift}}\big)\cdot \hat{g},\\ + * A_2 &\;\mathrel{+}= q_{\mathrm{poseidon2\_internal}}\cdot\big(v_2 - w_{2,\mathrm{shift}}\big)\cdot \hat{g},\\ + * A_3 &\;\mathrel{+}= q_{\mathrm{poseidon2\_internal}}\cdot\big(v_3 - w_{3,\mathrm{shift}}\big)\cdot \hat{g},\\ + * A_4 &\;\mathrel{+}= q_{\mathrm{poseidon2\_internal}}\cdot\big(v_4 - w_{4,\mathrm{shift}}\big)\cdot \hat{g}. + * \f} + * At the end of each Sumcheck round, the subrelation accumulators are aggregated with independent challenges + * \f$ \alpha_i = \alpha_{i,\mathrm{Poseidon2Int}} \f$ (from the `SubrelationSeparators`) + * \f[ + * \alpha_{0}A_1 + \alpha_{1}A_2 + \alpha_{2}A_3 + \alpha_{3}A_4 + * \f] + * and multiplied by the linear factor of the `GateSeparatorPolynomial`. + * @param evals A tuple of tuples of univariate accumulators; the subtuple for this relation is + * \f$[A_1,A_2,A_3,A_4]\f$, with \f$ \deg(A_k) = \text{SUBRELATION_PARTIAL_LENGTHS}[k] - 1 \f$. + * @param in In round \f$ k \f$ of Sumcheck at the point \f$ i_{>k} = (i_{k+1},\ldots,i_{d-1}) \f$ on the + * \f$ d-k-1 \f$ dimensional hypercube, an array of restrictions of the prover polynomials + * \f$ P_i(u_{k}) \f$. + * @param parameters Not used in this relation. + * @param scaling_factor Scaling term \f$ \hat{g} \f$ from the GateSeparatorPolynomial. */ template void static accumulate(ContainerOverSubrelations& evals, @@ -55,60 +100,62 @@ template class Poseidon2InternalRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("PoseidonInt::accumulate"); + // Univariates of degree 6 represented in Lagrange basis using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + // Low-degree univariates represented in monomial basis using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; - auto w_l_m = CoefficientAccumulator(in.w_l); - auto w_l_shift_m = CoefficientAccumulator(in.w_l_shift); - auto w_r_shift_m = CoefficientAccumulator(in.w_r_shift); - auto w_o_shift_m = CoefficientAccumulator(in.w_o_shift); - auto w_4_shift_m = CoefficientAccumulator(in.w_4_shift); - auto q_l_m = CoefficientAccumulator(in.q_l); - auto q_poseidon2_internal_m = CoefficientAccumulator(in.q_poseidon2_internal); - - // add round constants - auto s1 = Accumulator(w_l_m + q_l_m); - - // apply s-box round + // Current state + const auto w_1 = CoefficientAccumulator(in.w_l); + const auto w_2 = CoefficientAccumulator(in.w_r); + const auto w_3 = CoefficientAccumulator(in.w_o); + const auto w_4 = CoefficientAccumulator(in.w_4); + // Expected state, contained in the next row + const auto w_1_shift = CoefficientAccumulator(in.w_l_shift); + const auto w_2_shift = CoefficientAccumulator(in.w_r_shift); + const auto w_3_shift = CoefficientAccumulator(in.w_o_shift); + const auto w_4_shift = CoefficientAccumulator(in.w_4_shift); + // Poseidon2 internal relation selector + const auto q_poseidon2_internal_m = CoefficientAccumulator(in.q_poseidon2_internal); + // ĉ₀⁽ⁱ⁾ - the round constant in `i`-th internal round + const auto c_0_int = CoefficientAccumulator(in.q_l); + + Accumulator barycentric_term; + + // Add ĉ₀⁽ⁱ⁾ stored in the selector and convert to Lagrange basis + auto s1 = Accumulator(w_1 + c_0_int); + + // Apply S-box. Note that the multiplication is performed point-wise auto u1 = s1.sqr(); u1 = u1.sqr(); u1 *= s1; - auto u2_m = CoefficientAccumulator(in.w_r); - auto u3_m = CoefficientAccumulator(in.w_o); - auto u4_m = CoefficientAccumulator(in.w_4); - - auto q_pos_by_scaling_m = (q_poseidon2_internal_m * scaling_factor); - auto q_pos_by_scaling = Accumulator(q_pos_by_scaling_m); - // matrix mul with v = M_I * u 4 muls and 7 additions - auto partial_sum = u2_m + u3_m + u4_m; - auto scaled_u1 = u1 * q_pos_by_scaling; - - static const auto diagonal_term = FF(1) + crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[0]; - auto barycentric_term = scaled_u1 * (diagonal_term); - auto monomial_term = partial_sum; - monomial_term -= w_l_shift_m; + + const auto q_pos_by_scaling_m = (q_poseidon2_internal_m * scaling_factor); + const auto q_pos_by_scaling = Accumulator(q_pos_by_scaling_m); + // Common terms + const auto partial_sum = w_2 + w_3 + w_4; + const auto scaled_u1 = u1 * q_pos_by_scaling; + + // Row 1: + barycentric_term = scaled_u1 * D1_plus_1; + auto monomial_term = partial_sum - w_1_shift; barycentric_term += Accumulator(monomial_term * q_pos_by_scaling_m); std::get<0>(evals) += barycentric_term; - auto v2_m = u2_m * crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[1]; - v2_m += partial_sum; - v2_m -= w_r_shift_m; + // Row 2: + auto v2_m = w_2 * D2 + partial_sum - w_2_shift; barycentric_term = Accumulator(v2_m * q_pos_by_scaling_m); barycentric_term += scaled_u1; std::get<1>(evals) += barycentric_term; - auto v3_m = u3_m * crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[2]; - v3_m += partial_sum; - v3_m -= w_o_shift_m; + // Row 3: + auto v3_m = w_3 * D3 + partial_sum - w_3_shift; barycentric_term = Accumulator(v3_m * q_pos_by_scaling_m); barycentric_term += scaled_u1; std::get<2>(evals) += barycentric_term; - auto v4_m = u4_m * crypto::Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal[3]; - v4_m += partial_sum; - v4_m -= w_4_shift_m; - + // Row 4: + auto v4_m = w_4 * D4 + partial_sum - w_4_shift; barycentric_term = Accumulator(v4_m * q_pos_by_scaling_m); barycentric_term += scaled_u1; std::get<3>(evals) += barycentric_term; @@ -116,4 +163,4 @@ template class Poseidon2InternalRelationImpl { }; // namespace bb template using Poseidon2InternalRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/relation_parameters.hpp b/barretenberg/cpp/src/barretenberg/relations/relation_parameters.hpp index 0f976f19fde3..3aa1f15bfe68 100644 --- a/barretenberg/cpp/src/barretenberg/relations/relation_parameters.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/relation_parameters.hpp @@ -21,19 +21,23 @@ template struct RelationParameters { static constexpr int NUM_BINARY_LIMBS_IN_GOBLIN_TRANSLATOR = 4; static constexpr int NUM_NATIVE_LIMBS_IN_GOBLIN_TRANSLATOR = 1; static constexpr int NUM_CHALLENGE_POWERS_IN_GOBLIN_TRANSLATOR = 4; - static constexpr int NUM_TO_FOLD = 7; + static constexpr int NUM_TO_FOLD = 6; - T eta{ 0 }; // Lookup + Aux Memory - T eta_two{ 0 }; // Lookup + Aux Memory - T eta_three{ 0 }; // Lookup + Aux Memory - T beta{ 0 }; // Permutation + Lookup - T gamma{ 0 }; // Permutation + Lookup - T public_input_delta{ 0 }; // Permutation - T lookup_grand_product_delta{ 0 }; // Lookup + T eta{ 0 }; // Lookup + Aux Memory + T eta_two{ 0 }; // Lookup + Aux Memory + T eta_three{ 0 }; // Lookup + Aux Memory + T beta{ 0 }; // Permutation + Lookup + T gamma{ 0 }; // Permutation + Lookup + T public_input_delta{ 0 }; // Permutation T beta_sqr{ 0 }; T beta_cube{ 0 }; - // eccvm_set_permutation_delta is used in the set membership gadget in eccvm/ecc_set_relation.hpp - // We can remove this by modifying the relation, but increases complexity + // `eccvm_set_permutation_delta` is used in the set membership gadget in eccvm/ecc_set_relation.hpp, specifically to + // constrain (pc, round, wnaf_slice) to match between the MSM table and the Precomputed table. The number of rows we + // add per short scalar `mul` is slightly less in the Precomputed table as in the MSM table, so to get the + // permutation argument to work out, when `precompute_select == 0`, we must implicitly add (0, 0, 0) as a tuple on + // the wNAF side. This corresponds to multiplying by (γ)·(γ + β²)·(γ + 2β²)·(γ + 3β²). + // + // We can remove this by modifying the relation, but this would increase the complexity. T eccvm_set_permutation_delta = T(0); std::array accumulated_result = { T(0), T(0), T(0), T(0) }; // Translator std::array evaluation_input_x = { @@ -48,12 +52,12 @@ template struct RelationParameters { RefArray get_to_fold() { - return RefArray{ eta, eta_two, eta_three, beta, gamma, public_input_delta, lookup_grand_product_delta }; + return RefArray{ eta, eta_two, eta_three, beta, gamma, public_input_delta }; } RefArray get_to_fold() const { - return RefArray{ eta, eta_two, eta_three, beta, gamma, public_input_delta, lookup_grand_product_delta }; + return RefArray{ eta, eta_two, eta_three, beta, gamma, public_input_delta }; } static RelationParameters get_random() @@ -67,7 +71,6 @@ template struct RelationParameters { result.beta_cube = result.beta_sqr * result.beta; result.gamma = T::random_element(); result.public_input_delta = T::random_element(); - result.lookup_grand_product_delta = T::random_element(); result.eccvm_set_permutation_delta = result.gamma * (result.gamma + result.beta_sqr) * (result.gamma + result.beta_sqr + result.beta_sqr) * (result.gamma + result.beta_sqr + result.beta_sqr + result.beta_sqr); @@ -92,6 +95,6 @@ template struct RelationParameters { return result; } - MSGPACK_FIELDS(eta, eta_two, eta_three, beta, gamma, public_input_delta, lookup_grand_product_delta); + MSGPACK_FIELDS(eta, eta_two, eta_three, beta, gamma, public_input_delta); }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp b/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp index ef830a063290..0053c13510da 100644 --- a/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp @@ -29,9 +29,7 @@ using GetParameterView = std::conditional_t, template concept HasSubrelationLinearlyIndependentMember = requires(T) { - { - std::get<0>(T::SUBRELATION_LINEARLY_INDEPENDENT) - } -> std::convertible_to; + { std::get<0>(T::SUBRELATION_LINEARLY_INDEPENDENT) } -> std::convertible_to; }; template @@ -76,22 +74,22 @@ consteval std::array c /** * @brief Get the subrelation accumulators for the Protogalaxy combiner calculation. * @details A subrelation of degree D, when evaluated on polynomials of degree N, gives a polynomial of degree D - * * N. In the context of Protogalaxy, N = NUM_KEYS-1. Hence, given a subrelation of length x, its - * evaluation on such polynomials will have degree (x-1) * (NUM_KEYS-1), and the length of this evaluation + * * N. In the context of Protogalaxy, N = NUM_INSTANCES-1. Hence, given a subrelation of length x, its + * evaluation on such polynomials will have degree (x-1) * (NUM_INSTANCES-1), and the length of this evaluation * will be one greater than this. - * @tparam NUM_KEYS + * @tparam NUM_INSTANCES * @tparam NUM_SUBRELATIONS * @param SUBRELATION_PARTIAL_LENGTHS The array of subrelation lengths supplied by a relation. * @return The transformed subrelation lenths */ -template +template consteval std::array compute_composed_subrelation_partial_lengths( std::array SUBRELATION_PARTIAL_LENGTHS) { std::transform(SUBRELATION_PARTIAL_LENGTHS.begin(), SUBRELATION_PARTIAL_LENGTHS.end(), SUBRELATION_PARTIAL_LENGTHS.begin(), - [](const size_t x) { return (x - 1) * (NUM_KEYS - 1) + 1; }); + [](const size_t x) { return (x - 1) * (NUM_INSTANCES - 1) + 1; }); return SUBRELATION_PARTIAL_LENGTHS; }; @@ -119,7 +117,7 @@ consteval std::array compute_composed_subrelation_part /** * @brief Check if the relation has a static skip method to determine if accumulation of its result can be - * optimised away based on a single check + * optimized away based on a single check * * @details The skip function should return true if relation can be skipped and false if it can't * @tparam Relation The relation type @@ -127,9 +125,7 @@ consteval std::array compute_composed_subrelation_part */ template concept isSkippable = requires(const AllEntities& input) { - { - Relation::skip(input) - } -> std::same_as; + { Relation::skip(input) } -> std::same_as; }; /** @@ -144,9 +140,7 @@ concept isSkippable = requires(const AllEntities& input) { template concept isRowSkippable = requires(const ProverPolynomialsOrPartiallyEvaluatedMultivariates& input, const EdgeType edge_idx) { - { - Flavor::skip_entire_row(input, edge_idx) - } -> std::same_as; + { Flavor::skip_entire_row(input, edge_idx) } -> std::same_as; }; /** @@ -169,15 +163,15 @@ template class Relation : public RelationImpl { static constexpr size_t TOTAL_RELATION_LENGTH = *std::max_element(SUBRELATION_TOTAL_LENGTHS.begin(), SUBRELATION_TOTAL_LENGTHS.end()); - template + template using ProtogalaxyTupleOfUnivariatesOverSubrelationsNoOptimisticSkipping = - TupleOfUnivariates(SUBRELATION_TOTAL_LENGTHS)>; - template + TupleOfUnivariates(SUBRELATION_TOTAL_LENGTHS)>; + template using ProtogalaxyTupleOfUnivariatesOverSubrelations = TupleOfUnivariatesWithOptimisticSkipping( + compute_composed_subrelation_partial_lengths( SUBRELATION_TOTAL_LENGTHS), - NUM_KEYS - 1>; + NUM_INSTANCES - 1>; using SumcheckTupleOfUnivariatesOverSubrelations = TupleOfUnivariates; diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp index 6fba92a17fa6..00af4d0bf823 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp @@ -16,12 +16,12 @@ template class TranslatorDecompositionRelationImpl { // 1 + polynomial degree of this relation static constexpr size_t RELATION_LENGTH = - 3; // degree(lagrange_even_in_minicircuit_in_minicircuit(a - a_0 - a_1*2¹⁴ ... - a_l⋅2¹⁴ˡ )) = 2 + 4; // degree(lagrange_even_in_minicircuit_in_minicircuit(a - a_0 - a_1*2¹⁴ ... - a_l⋅2¹⁴ˡ )op) = 3 static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 3, // decomposition of P.x limb 0 into microlimbs subrelation - 3, // decomposition of P.x limb 1 into microlimbs subrelation - 3, // decomposition of P.x limb 2 into microlimbs subrelation - 3, // decomposition of P.x limb 3 into microlimbs subrelation + 4, // decomposition of accumulator limb 0 into microlimbs subrelation + 4, // decomposition of accumulator limb 1 into microlimbs subrelation + 4, // decomposition of accumulator limb 2 into microlimbs subrelation + 4, // decomposition of accumulator limb 3 into microlimbs subrelation 3, // decomposition of P.y limb 0 into microlimbs subrelation 3, // decomposition of P.y limb 1 into microlimbs subrelation 3, // decomposition of P.y limb 2 into microlimbs subrelation @@ -30,10 +30,10 @@ template class TranslatorDecompositionRelationImpl { 3, // decomposition of z2 limb 0 into microlimbs subrelation 3, // decomposition of z1 limb 1 into microlimbs subrelation 3, // decomposition of z2 limb 1 into microlimbs subrelation - 3, // decomposition of accumulator limb 0 into microlimbs subrelation - 3, // decomposition of accumulator limb 1 into microlimbs subrelation - 3, // decomposition of accumulator limb 2 into microlimbs subrelation - 3, // decomposition of accumulator limb 3 into microlimbs subrelation + 3, // decomposition of P.x limb 0 into microlimbs subrelation + 3, // decomposition of P.x limb 1 into microlimbs subrelation + 3, // decomposition of P.x limb 2 into microlimbs subrelation + 3, // decomposition of P.x limb 3 into microlimbs subrelation 3, // decomposition of quotient limb 0 into microlimbs subrelation 3, // decomposition of quotient limb 1 into microlimbs subrelation 3, // decomposition of quotient limb 2 into microlimbs subrelation diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp index 52a8d47b717d..6ccaa6b737b4 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp @@ -37,588 +37,624 @@ void TranslatorDecompositionRelationImpl::accumulate(ContainerOverSubrelatio static constexpr size_t NUM_LIMB_BITS = 68; // Number of bits in a standard limb used for bigfield operations static constexpr size_t NUM_MICRO_LIMB_BITS = 14; // Number of bits in a standard limb used for bigfield operations - // Value to multiply an element by to perform an appropriate shift - static auto LIMB_SHIFT = FF(uint256_t(1) << NUM_LIMB_BITS); - - // Values to multiply an element by to perform an appropriate shift - static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); - static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx5 = MICRO_LIMB_SHIFTx4 * MICRO_LIMB_SHIFT; - - // Shifts used to constrain ranges further - static auto SHIFT_12_TO_14 = - FF(4); // Shift used to range constrain the last microlimb of 68-bit limbs (standard limbs) - static auto SHIFT_10_TO_14 = - FF(16); // Shift used to range constrain the last microlimb of 52-bit limb (top quotient limb) - static auto SHIFT_8_TO_14 = FF(64); // Shift used to range constrain the last microlimb of 50-bit - // limbs (top limb of standard 254-bit value) - static auto SHIFT_4_TO_14 = - FF(1024); // Shift used to range constrain the last mircrolimb of 60-bit limbs from z scalars - - using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; - using View = typename Accumulator::View; - - auto p_x_low_limbs = View(in.p_x_low_limbs); - auto p_x_low_limbs_range_constraint_0 = View(in.p_x_low_limbs_range_constraint_0); - auto p_x_low_limbs_range_constraint_1 = View(in.p_x_low_limbs_range_constraint_1); - auto p_x_low_limbs_range_constraint_2 = View(in.p_x_low_limbs_range_constraint_2); - auto p_x_low_limbs_range_constraint_3 = View(in.p_x_low_limbs_range_constraint_3); - auto p_x_low_limbs_range_constraint_4 = View(in.p_x_low_limbs_range_constraint_4); - auto p_x_low_limbs_shift = View(in.p_x_low_limbs_shift); - auto p_x_low_limbs_range_constraint_0_shift = View(in.p_x_low_limbs_range_constraint_0_shift); - auto p_x_low_limbs_range_constraint_1_shift = View(in.p_x_low_limbs_range_constraint_1_shift); - auto p_x_low_limbs_range_constraint_2_shift = View(in.p_x_low_limbs_range_constraint_2_shift); - auto p_x_low_limbs_range_constraint_3_shift = View(in.p_x_low_limbs_range_constraint_3_shift); - auto p_x_low_limbs_range_constraint_4_shift = View(in.p_x_low_limbs_range_constraint_4_shift); - auto p_x_high_limbs = View(in.p_x_high_limbs); - auto p_x_high_limbs_range_constraint_0 = View(in.p_x_high_limbs_range_constraint_0); - auto p_x_high_limbs_range_constraint_1 = View(in.p_x_high_limbs_range_constraint_1); - auto p_x_high_limbs_range_constraint_2 = View(in.p_x_high_limbs_range_constraint_2); - auto p_x_high_limbs_range_constraint_3 = View(in.p_x_high_limbs_range_constraint_3); - auto p_x_high_limbs_range_constraint_4 = View(in.p_x_high_limbs_range_constraint_4); - auto p_x_high_limbs_shift = View(in.p_x_high_limbs_shift); - auto p_x_high_limbs_range_constraint_0_shift = View(in.p_x_high_limbs_range_constraint_0_shift); - auto p_x_high_limbs_range_constraint_1_shift = View(in.p_x_high_limbs_range_constraint_1_shift); - auto p_x_high_limbs_range_constraint_2_shift = View(in.p_x_high_limbs_range_constraint_2_shift); - auto p_x_high_limbs_range_constraint_3_shift = View(in.p_x_high_limbs_range_constraint_3_shift); - auto p_y_low_limbs = View(in.p_y_low_limbs); - auto p_y_low_limbs_range_constraint_0 = View(in.p_y_low_limbs_range_constraint_0); - auto p_y_low_limbs_range_constraint_1 = View(in.p_y_low_limbs_range_constraint_1); - auto p_y_low_limbs_range_constraint_2 = View(in.p_y_low_limbs_range_constraint_2); - auto p_y_low_limbs_range_constraint_3 = View(in.p_y_low_limbs_range_constraint_3); - auto p_y_low_limbs_range_constraint_4 = View(in.p_y_low_limbs_range_constraint_4); - auto p_y_low_limbs_shift = View(in.p_y_low_limbs_shift); - auto p_y_low_limbs_range_constraint_0_shift = View(in.p_y_low_limbs_range_constraint_0_shift); - auto p_y_low_limbs_range_constraint_1_shift = View(in.p_y_low_limbs_range_constraint_1_shift); - auto p_y_low_limbs_range_constraint_2_shift = View(in.p_y_low_limbs_range_constraint_2_shift); - auto p_y_low_limbs_range_constraint_3_shift = View(in.p_y_low_limbs_range_constraint_3_shift); - auto p_y_low_limbs_range_constraint_4_shift = View(in.p_y_low_limbs_range_constraint_4_shift); - auto p_y_high_limbs = View(in.p_y_high_limbs); - auto p_y_high_limbs_range_constraint_0 = View(in.p_y_high_limbs_range_constraint_0); - auto p_y_high_limbs_range_constraint_1 = View(in.p_y_high_limbs_range_constraint_1); - auto p_y_high_limbs_range_constraint_2 = View(in.p_y_high_limbs_range_constraint_2); - auto p_y_high_limbs_range_constraint_3 = View(in.p_y_high_limbs_range_constraint_3); - auto p_y_high_limbs_range_constraint_4 = View(in.p_y_high_limbs_range_constraint_4); - auto p_y_high_limbs_shift = View(in.p_y_high_limbs_shift); - auto p_y_high_limbs_range_constraint_0_shift = View(in.p_y_high_limbs_range_constraint_0_shift); - auto p_y_high_limbs_range_constraint_1_shift = View(in.p_y_high_limbs_range_constraint_1_shift); - auto p_y_high_limbs_range_constraint_2_shift = View(in.p_y_high_limbs_range_constraint_2_shift); - auto p_y_high_limbs_range_constraint_3_shift = View(in.p_y_high_limbs_range_constraint_3_shift); - auto z_low_limbs = View(in.z_low_limbs); - auto z_low_limbs_range_constraint_0 = View(in.z_low_limbs_range_constraint_0); - auto z_low_limbs_range_constraint_1 = View(in.z_low_limbs_range_constraint_1); - auto z_low_limbs_range_constraint_2 = View(in.z_low_limbs_range_constraint_2); - auto z_low_limbs_range_constraint_3 = View(in.z_low_limbs_range_constraint_3); - auto z_low_limbs_range_constraint_4 = View(in.z_low_limbs_range_constraint_4); - auto z_low_limbs_shift = View(in.z_low_limbs_shift); - auto z_low_limbs_range_constraint_0_shift = View(in.z_low_limbs_range_constraint_0_shift); - auto z_low_limbs_range_constraint_1_shift = View(in.z_low_limbs_range_constraint_1_shift); - auto z_low_limbs_range_constraint_2_shift = View(in.z_low_limbs_range_constraint_2_shift); - auto z_low_limbs_range_constraint_3_shift = View(in.z_low_limbs_range_constraint_3_shift); - auto z_low_limbs_range_constraint_4_shift = View(in.z_low_limbs_range_constraint_4_shift); - auto z_high_limbs = View(in.z_high_limbs); - auto z_high_limbs_range_constraint_0 = View(in.z_high_limbs_range_constraint_0); - auto z_high_limbs_range_constraint_1 = View(in.z_high_limbs_range_constraint_1); - auto z_high_limbs_range_constraint_2 = View(in.z_high_limbs_range_constraint_2); - auto z_high_limbs_range_constraint_3 = View(in.z_high_limbs_range_constraint_3); - auto z_high_limbs_range_constraint_4 = View(in.z_high_limbs_range_constraint_4); - auto z_high_limbs_shift = View(in.z_high_limbs_shift); - auto z_high_limbs_range_constraint_0_shift = View(in.z_high_limbs_range_constraint_0_shift); - auto z_high_limbs_range_constraint_1_shift = View(in.z_high_limbs_range_constraint_1_shift); - auto z_high_limbs_range_constraint_2_shift = View(in.z_high_limbs_range_constraint_2_shift); - auto z_high_limbs_range_constraint_3_shift = View(in.z_high_limbs_range_constraint_3_shift); - auto z_high_limbs_range_constraint_4_shift = View(in.z_high_limbs_range_constraint_4_shift); - auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); - auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); - auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); - auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); - auto accumulator_low_limbs_range_constraint_0 = View(in.accumulator_low_limbs_range_constraint_0); - auto accumulator_low_limbs_range_constraint_1 = View(in.accumulator_low_limbs_range_constraint_1); - auto accumulator_low_limbs_range_constraint_2 = View(in.accumulator_low_limbs_range_constraint_2); - auto accumulator_low_limbs_range_constraint_3 = View(in.accumulator_low_limbs_range_constraint_3); - auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); - auto accumulator_low_limbs_range_constraint_0_shift = View(in.accumulator_low_limbs_range_constraint_0_shift); - auto accumulator_low_limbs_range_constraint_1_shift = View(in.accumulator_low_limbs_range_constraint_1_shift); - auto accumulator_low_limbs_range_constraint_2_shift = View(in.accumulator_low_limbs_range_constraint_2_shift); - auto accumulator_low_limbs_range_constraint_3_shift = View(in.accumulator_low_limbs_range_constraint_3_shift); - auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); - auto accumulator_high_limbs_range_constraint_0 = View(in.accumulator_high_limbs_range_constraint_0); - auto accumulator_high_limbs_range_constraint_1 = View(in.accumulator_high_limbs_range_constraint_1); - auto accumulator_high_limbs_range_constraint_2 = View(in.accumulator_high_limbs_range_constraint_2); - auto accumulator_high_limbs_range_constraint_3 = View(in.accumulator_high_limbs_range_constraint_3); - auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); - auto accumulator_high_limbs_range_constraint_0_shift = View(in.accumulator_high_limbs_range_constraint_0_shift); - auto accumulator_high_limbs_range_constraint_1_shift = View(in.accumulator_high_limbs_range_constraint_1_shift); - auto accumulator_high_limbs_range_constraint_2_shift = View(in.accumulator_high_limbs_range_constraint_2_shift); - auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); - auto quotient_low_binary_limbs = View(in.quotient_low_binary_limbs); - auto quotient_low_limbs_range_constraint_0 = View(in.quotient_low_limbs_range_constraint_0); - auto quotient_low_limbs_range_constraint_1 = View(in.quotient_low_limbs_range_constraint_1); - auto quotient_low_limbs_range_constraint_2 = View(in.quotient_low_limbs_range_constraint_2); - auto quotient_low_limbs_range_constraint_3 = View(in.quotient_low_limbs_range_constraint_3); - auto quotient_low_limbs_range_constraint_4 = View(in.quotient_low_limbs_range_constraint_4); - auto quotient_low_binary_limbs_shift = View(in.quotient_low_binary_limbs_shift); - auto quotient_low_limbs_range_constraint_0_shift = View(in.quotient_low_limbs_range_constraint_0_shift); - auto quotient_low_limbs_range_constraint_1_shift = View(in.quotient_low_limbs_range_constraint_1_shift); - auto quotient_low_limbs_range_constraint_2_shift = View(in.quotient_low_limbs_range_constraint_2_shift); - auto quotient_low_limbs_range_constraint_3_shift = View(in.quotient_low_limbs_range_constraint_3_shift); - auto quotient_low_limbs_range_constraint_4_shift = View(in.quotient_low_limbs_range_constraint_4_shift); - auto quotient_high_binary_limbs = View(in.quotient_high_binary_limbs); - auto quotient_high_limbs_range_constraint_0 = View(in.quotient_high_limbs_range_constraint_0); - auto quotient_high_limbs_range_constraint_1 = View(in.quotient_high_limbs_range_constraint_1); - auto quotient_high_limbs_range_constraint_2 = View(in.quotient_high_limbs_range_constraint_2); - auto quotient_high_limbs_range_constraint_3 = View(in.quotient_high_limbs_range_constraint_3); - auto quotient_high_limbs_range_constraint_4 = View(in.quotient_high_limbs_range_constraint_4); - auto quotient_high_binary_limbs_shift = View(in.quotient_high_binary_limbs_shift); - auto quotient_high_limbs_range_constraint_0_shift = View(in.quotient_high_limbs_range_constraint_0_shift); - auto quotient_high_limbs_range_constraint_1_shift = View(in.quotient_high_limbs_range_constraint_1_shift); - auto quotient_high_limbs_range_constraint_2_shift = View(in.quotient_high_limbs_range_constraint_2_shift); - auto quotient_high_limbs_range_constraint_3_shift = View(in.quotient_high_limbs_range_constraint_3_shift); - auto relation_wide_limbs = View(in.relation_wide_limbs); - auto relation_wide_limbs_range_constraint_0 = View(in.relation_wide_limbs_range_constraint_0); - auto relation_wide_limbs_range_constraint_1 = View(in.relation_wide_limbs_range_constraint_1); - auto relation_wide_limbs_range_constraint_2 = View(in.relation_wide_limbs_range_constraint_2); - auto relation_wide_limbs_range_constraint_3 = View(in.relation_wide_limbs_range_constraint_3); - auto p_x_high_limbs_range_constraint_tail_shift = View(in.p_x_high_limbs_range_constraint_tail_shift); - auto accumulator_high_limbs_range_constraint_tail_shift = - View(in.accumulator_high_limbs_range_constraint_tail_shift); - auto relation_wide_limbs_shift = View(in.relation_wide_limbs_shift); - auto relation_wide_limbs_range_constraint_0_shift = View(in.relation_wide_limbs_range_constraint_0_shift); - auto relation_wide_limbs_range_constraint_1_shift = View(in.relation_wide_limbs_range_constraint_1_shift); - auto relation_wide_limbs_range_constraint_2_shift = View(in.relation_wide_limbs_range_constraint_2_shift); - auto relation_wide_limbs_range_constraint_3_shift = View(in.relation_wide_limbs_range_constraint_3_shift); - auto p_y_high_limbs_range_constraint_tail_shift = View(in.p_y_high_limbs_range_constraint_tail_shift); - auto quotient_high_limbs_range_constraint_tail_shift = View(in.quotient_high_limbs_range_constraint_tail_shift); - auto p_x_low_limbs_range_constraint_tail = View(in.p_x_low_limbs_range_constraint_tail); - auto p_x_low_limbs_range_constraint_tail_shift = View(in.p_x_low_limbs_range_constraint_tail_shift); - auto p_x_high_limbs_range_constraint_tail = View(in.p_x_high_limbs_range_constraint_tail); - auto p_x_high_limbs_range_constraint_4_shift = View(in.p_x_high_limbs_range_constraint_4_shift); - auto p_y_low_limbs_range_constraint_tail = View(in.p_y_low_limbs_range_constraint_tail); - auto p_y_low_limbs_range_constraint_tail_shift = View(in.p_y_low_limbs_range_constraint_tail_shift); - auto p_y_high_limbs_range_constraint_tail = View(in.p_y_high_limbs_range_constraint_tail); - auto p_y_high_limbs_range_constraint_4_shift = View(in.p_y_high_limbs_range_constraint_4_shift); - auto z_low_limbs_range_constraint_tail = View(in.z_low_limbs_range_constraint_tail); - auto z_low_limbs_range_constraint_tail_shift = View(in.z_low_limbs_range_constraint_tail_shift); - auto z_high_limbs_range_constraint_tail = View(in.z_high_limbs_range_constraint_tail); - auto z_high_limbs_range_constraint_tail_shift = View(in.z_high_limbs_range_constraint_tail_shift); - auto accumulator_low_limbs_range_constraint_tail = View(in.accumulator_low_limbs_range_constraint_tail); - auto accumulator_low_limbs_range_constraint_tail_shift = View(in.accumulator_low_limbs_range_constraint_tail_shift); - auto accumulator_high_limbs_range_constraint_tail = View(in.accumulator_high_limbs_range_constraint_tail); - auto accumulator_high_limbs_range_constraint_4_shift = View(in.accumulator_high_limbs_range_constraint_4_shift); - auto quotient_low_limbs_range_constraint_tail = View(in.quotient_low_limbs_range_constraint_tail); - auto quotient_low_limbs_range_constraint_tail_shift = View(in.quotient_low_limbs_range_constraint_tail_shift); - auto quotient_high_limbs_range_constraint_tail = View(in.quotient_high_limbs_range_constraint_tail); - auto quotient_high_limbs_range_constraint_4_shift = View(in.quotient_high_limbs_range_constraint_4_shift); - auto x_lo_y_hi = View(in.x_lo_y_hi); - auto x_hi_z_1 = View(in.x_hi_z_1); - auto y_lo_z_2 = View(in.y_lo_z_2); - auto x_lo_y_hi_shift = View(in.x_lo_y_hi_shift); - auto x_hi_z_1_shift = View(in.x_hi_z_1_shift); - auto y_lo_z_2_shift = View(in.y_lo_z_2_shift); - auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); - - // Contributions that decompose 50, 52, 68 or 84 bit limbs used for computation into range-constrained chunks - // Contribution 1 , P_x lowest limb decomposition - auto tmp_1 = ((p_x_low_limbs_range_constraint_0 + p_x_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_x_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_x_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_x_low_limbs); - tmp_1 *= lagrange_even_in_minicircuit; - tmp_1 *= scaling_factor; - std::get<0>(accumulators) += tmp_1; - - // Contribution 2 , P_x second lowest limb decomposition - auto tmp_2 = ((p_x_low_limbs_range_constraint_0_shift + p_x_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_x_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_x_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_x_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - p_x_low_limbs_shift); - tmp_2 *= lagrange_even_in_minicircuit; - tmp_2 *= scaling_factor; - std::get<1>(accumulators) += tmp_2; - - // Contribution 3 , P_x third limb decomposition - auto tmp_3 = ((p_x_high_limbs_range_constraint_0 + p_x_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_x_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_x_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_x_high_limbs); - tmp_3 *= lagrange_even_in_minicircuit; - tmp_3 *= scaling_factor; - std::get<2>(accumulators) += tmp_3; - - // Contribution 4 , P_x highest limb decomposition - auto tmp_4 = - ((p_x_high_limbs_range_constraint_0_shift + p_x_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_x_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_x_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - p_x_high_limbs_shift); - tmp_4 *= lagrange_even_in_minicircuit; - tmp_4 *= scaling_factor; - std::get<3>(accumulators) += tmp_4; - - // Contribution 5 , P_y lowest limb decomposition - auto tmp_5 = ((p_y_low_limbs_range_constraint_0 + p_y_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_y_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_y_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_y_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_y_low_limbs); - tmp_5 *= lagrange_even_in_minicircuit; - tmp_5 *= scaling_factor; - std::get<4>(accumulators) += tmp_5; - - // Contribution 6 , P_y second lowest limb decomposition - auto tmp_6 = ((p_y_low_limbs_range_constraint_0_shift + p_y_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_y_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_y_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_y_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - p_y_low_limbs_shift); - tmp_6 *= lagrange_even_in_minicircuit; - tmp_6 *= scaling_factor; - std::get<5>(accumulators) += tmp_6; - - // Contribution 7 , P_y third limb decomposition - auto tmp_7 = ((p_y_high_limbs_range_constraint_0 + p_y_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_y_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_y_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_y_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_y_high_limbs); - tmp_7 *= lagrange_even_in_minicircuit; - tmp_7 *= scaling_factor; - std::get<6>(accumulators) += tmp_7; - - // Contribution 8 , P_y highest limb decomposition - auto tmp_8 = - ((p_y_high_limbs_range_constraint_0_shift + p_y_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_y_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_y_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - p_y_high_limbs_shift); - tmp_8 *= lagrange_even_in_minicircuit; - tmp_8 *= scaling_factor; - std::get<7>(accumulators) += tmp_8; - - // Contribution 9 , z_1 low limb decomposition - auto tmp_9 = - ((z_low_limbs_range_constraint_0 + z_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - z_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + z_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - z_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - z_low_limbs); - tmp_9 *= lagrange_even_in_minicircuit; - tmp_9 *= scaling_factor; - std::get<8>(accumulators) += tmp_9; - - // Contribution 10 , z_2 low limb decomposition - auto tmp_10 = ((z_low_limbs_range_constraint_0_shift + z_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - z_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - z_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - z_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - z_low_limbs_shift); - tmp_10 *= lagrange_even_in_minicircuit; - tmp_10 *= scaling_factor; - std::get<9>(accumulators) += tmp_10; - - // Contribution 11 , z_1 high limb decomposition - auto tmp_11 = - ((z_high_limbs_range_constraint_0 + z_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - z_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + z_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - z_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - z_high_limbs); - tmp_11 *= lagrange_even_in_minicircuit; - tmp_11 *= scaling_factor; - std::get<10>(accumulators) += tmp_11; - - // Contribution 12 , z_2 high limb decomposition - auto tmp_12 = ((z_high_limbs_range_constraint_0_shift + z_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - z_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - z_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - z_high_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - z_high_limbs_shift); - tmp_12 *= lagrange_even_in_minicircuit; - tmp_12 *= scaling_factor; - std::get<11>(accumulators) += tmp_12; - - // Contribution 13 , accumulator lowest limb decomposition - auto tmp_13 = - ((accumulator_low_limbs_range_constraint_0 + accumulator_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - accumulator_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - accumulator_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - accumulator_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_0); - tmp_13 *= lagrange_even_in_minicircuit; - tmp_13 *= scaling_factor; - std::get<12>(accumulators) += tmp_13; - // Contribution 14 , accumulator second limb decomposition - auto tmp_14 = ((accumulator_low_limbs_range_constraint_0_shift + - accumulator_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - accumulator_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - accumulator_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - accumulator_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_1); - tmp_14 *= lagrange_even_in_minicircuit; - tmp_14 *= scaling_factor; - std::get<13>(accumulators) += tmp_14; - - // Contribution 15 , accumulator second highest limb decomposition - auto tmp_15 = - ((accumulator_high_limbs_range_constraint_0 + accumulator_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - accumulator_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - accumulator_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - accumulator_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_2); - tmp_15 *= lagrange_even_in_minicircuit; - tmp_15 *= scaling_factor; - std::get<14>(accumulators) += tmp_15; - // Contribution 16 , accumulator highest limb decomposition - auto tmp_16 = ((accumulator_high_limbs_range_constraint_0_shift + - accumulator_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - accumulator_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - accumulator_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - accumulators_binary_limbs_3); - tmp_16 *= lagrange_even_in_minicircuit; - tmp_16 *= scaling_factor; - std::get<15>(accumulators) += tmp_16; - - // Contribution 15 , quotient lowest limb decomposition - auto tmp_17 = ((quotient_low_limbs_range_constraint_0 + quotient_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - quotient_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - quotient_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - quotient_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - quotient_low_binary_limbs); - tmp_17 *= lagrange_even_in_minicircuit; - tmp_17 *= scaling_factor; - std::get<16>(accumulators) += tmp_17; - // Contribution 16 , quotient second lowest limb decomposition - auto tmp_18 = - ((quotient_low_limbs_range_constraint_0_shift + quotient_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - quotient_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - quotient_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - quotient_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - quotient_low_binary_limbs_shift); - tmp_18 *= lagrange_even_in_minicircuit; - tmp_18 *= scaling_factor; - std::get<17>(accumulators) += tmp_18; - - // Contribution 19 , quotient second highest limb decomposition - auto tmp_19 = ((quotient_high_limbs_range_constraint_0 + quotient_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - quotient_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - quotient_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - quotient_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - quotient_high_binary_limbs); - tmp_19 *= lagrange_even_in_minicircuit; - tmp_19 *= scaling_factor; - std::get<18>(accumulators) += tmp_19; - // Contribution 20 , quotient highest limb decomposition - auto tmp_20 = ((quotient_high_limbs_range_constraint_0_shift + - quotient_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - quotient_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - quotient_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - quotient_high_binary_limbs_shift); - tmp_20 *= lagrange_even_in_minicircuit; - tmp_20 *= scaling_factor; - std::get<19>(accumulators) += tmp_20; - - // Contribution 21 , decomposition of the low wide relation limb used for the bigfield relation. - // N.B. top microlimbs of relation wide limbs are stored in microlimbs for range constraints of P_x, P_y, - // accumulator and quotient. This is to save space and because these microlimbs are not used by their namesakes, - // since top limbs in 254/6-bit values use one less microlimb for the top 50/52-bit limb - auto tmp_21 = ((relation_wide_limbs_range_constraint_0 + relation_wide_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - relation_wide_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - relation_wide_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + - accumulator_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - - relation_wide_limbs); - tmp_21 *= lagrange_even_in_minicircuit; - tmp_21 *= scaling_factor; - std::get<20>(accumulators) += tmp_21; - - // Contribution 22 , decomposition of high relation limb - auto tmp_22 = ((relation_wide_limbs_range_constraint_0_shift + - relation_wide_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - relation_wide_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - relation_wide_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_y_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + - quotient_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - - relation_wide_limbs_shift); - tmp_22 *= lagrange_even_in_minicircuit; - tmp_22 *= scaling_factor; - std::get<21>(accumulators) += tmp_22; - - // Contributions enfocing a reduced range constraint on high limbs (these relation force the last microlimb in - // each limb to be more severely range constrained) - - // Contribution 23, range constrain the highest microlimb of lowest P.x limb to be 12 bits (68 % 14 = 12) - auto tmp_23 = p_x_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail; - tmp_23 *= lagrange_even_in_minicircuit; - tmp_23 *= scaling_factor; - std::get<22>(accumulators) += tmp_23; - - // Contribution 24, range constrain the highest microlimb of second lowest P.x limb to be 12 bits - auto tmp_24 = p_x_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail_shift; - tmp_24 *= lagrange_even_in_minicircuit; - tmp_24 *= scaling_factor; - std::get<23>(accumulators) += tmp_24; - - // Contribution 25, range constrain the highest microlimb of second highest P.x limb to be 12 bits - auto tmp_25 = p_x_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_high_limbs_range_constraint_tail; - tmp_25 *= lagrange_even_in_minicircuit; - tmp_25 *= scaling_factor; - std::get<24>(accumulators) += tmp_25; - - // Contribution 26, range constrain the highest microilmb of highest P.x limb to be 8 bits (50 % 14 = 8) - auto tmp_26 = (p_x_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_x_high_limbs_range_constraint_4_shift); - - tmp_26 *= lagrange_even_in_minicircuit; - tmp_26 *= scaling_factor; - std::get<25>(accumulators) += tmp_26; - - // Contribution 27, range constrain the highest microlimb of lowest P.y limb to be 12 bits (68 % 14 = 12) - auto tmp_27 = p_y_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail; - tmp_27 *= lagrange_even_in_minicircuit; - tmp_27 *= scaling_factor; - std::get<26>(accumulators) += tmp_27; - - // Contribution 28, range constrain the highest microlimb of second lowest P.y limb to be 12 bits (68 % 14 = 12) - auto tmp_28 = p_y_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail_shift; - tmp_28 *= lagrange_even_in_minicircuit; - tmp_28 *= scaling_factor; - std::get<27>(accumulators) += tmp_28; - - // Contribution 29, range constrain the highest microlimb of second highest P.y limb to be 12 bits (68 % 14 = - // 12) - auto tmp_29 = p_y_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_high_limbs_range_constraint_tail; - tmp_29 *= lagrange_even_in_minicircuit; - tmp_29 *= scaling_factor; - std::get<28>(accumulators) += tmp_29; - - // Contribution 30, range constrain the highest microlimb of highest P.y limb to be 8 bits (50 % 14 = 8) - auto tmp_30 = (p_y_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_y_high_limbs_range_constraint_4_shift); - - tmp_30 *= lagrange_even_in_minicircuit; - tmp_30 *= scaling_factor; - std::get<29>(accumulators) += tmp_30; - - // Contribution 31, range constrain the highest microlimb of low z1 limb to be 12 bits (68 % 14 = 12) - auto tmp_31 = (z_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail); - tmp_31 *= lagrange_even_in_minicircuit; - tmp_31 *= scaling_factor; - std::get<30>(accumulators) += tmp_31; - - // Contribution 32, range constrain the highest microlimb of low z2 limb to be 12 bits (68 % 14 = 12) - auto tmp_32 = (z_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail_shift); - tmp_32 *= lagrange_even_in_minicircuit; - tmp_32 *= scaling_factor; - std::get<31>(accumulators) += tmp_32; - - // Contribution 33, range constrain the highest microlimb of high z1 limb to be 4 bits (60 % 14 = 12) - auto tmp_33 = (z_high_limbs_range_constraint_4 * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail); - tmp_33 *= lagrange_even_in_minicircuit; - tmp_33 *= scaling_factor; - std::get<32>(accumulators) += tmp_33; - - // Contribution 34, range constrain the highest microlimb of high z2 limb to be 4 bits (60 % 14 = 12) - auto tmp_34 = (z_high_limbs_range_constraint_4_shift * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail_shift); - tmp_34 *= lagrange_even_in_minicircuit; - tmp_34 *= scaling_factor; - std::get<33>(accumulators) += tmp_34; - - // Contribution 35, range constrain the highest microlimb of lowest current accumulator limb to be 12 bits (68 % - // 14 = 12) - auto tmp_35 = - (accumulator_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_low_limbs_range_constraint_tail); - tmp_35 *= lagrange_even_in_minicircuit; - tmp_35 *= scaling_factor; - std::get<34>(accumulators) += tmp_35; - - // Contribution 36, range constrain the highest microlimb of second lowest current accumulator limb to be 12 - // bits (68 % 14 = 12) - auto tmp_36 = (accumulator_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - - accumulator_low_limbs_range_constraint_tail_shift); - tmp_36 *= lagrange_even_in_minicircuit; - tmp_36 *= scaling_factor; - std::get<35>(accumulators) += tmp_36; - - // Contribution 37, range constrain the highest microlimb of second highest current accumulator limb to be 12 - // bits (68 % 14 = 12) - auto tmp_37 = - (accumulator_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_high_limbs_range_constraint_tail); - tmp_37 *= lagrange_even_in_minicircuit; - tmp_37 *= scaling_factor; - std::get<36>(accumulators) += tmp_37; - - // Contribution 38, range constrain the highest microlimb of highest current accumulator limb to be 8 bits (50 % - // 14 = 12) - auto tmp_38 = (accumulator_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - - accumulator_high_limbs_range_constraint_4_shift); - tmp_38 *= lagrange_even_in_minicircuit; - tmp_38 *= scaling_factor; - std::get<37>(accumulators) += tmp_38; - - // Contribution 39, range constrain the highest microlimb of lowest quotient limb to be 12 bits (68 % 14 = 12) - auto tmp_39 = (quotient_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail); - tmp_39 *= lagrange_even_in_minicircuit; - tmp_39 *= scaling_factor; - std::get<38>(accumulators) += tmp_39; - - // Contribution 40, range constrain the highest microlimb of second lowest quotient limb to be 12 bits (68 % 14 - // = 12) - auto tmp_40 = - (quotient_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail_shift); - tmp_40 *= lagrange_even_in_minicircuit; - tmp_40 *= scaling_factor; - std::get<39>(accumulators) += tmp_40; - - // Contribution 41, range constrain the highest microlimb of second highest quotient limb to be 12 bits (68 % 14 - // = 12) - auto tmp_41 = (quotient_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_high_limbs_range_constraint_tail); - tmp_41 *= lagrange_even_in_minicircuit; - tmp_41 *= scaling_factor; - std::get<40>(accumulators) += tmp_41; - - // Contribution 42, range constrain the highest microlimb of highest quotient limb to be 10 bits (52 % 14 = 12) - auto tmp_42 = - (quotient_high_limbs_range_constraint_3_shift * SHIFT_10_TO_14 - quotient_high_limbs_range_constraint_4_shift); - tmp_42 *= lagrange_even_in_minicircuit; - tmp_42 *= scaling_factor; - std::get<41>(accumulators) += tmp_42; - - // Contributions where we decompose initial EccOpQueue values into 68-bit limbs - - // Contribution 43, decompose x_lo - auto tmp_43 = (p_x_low_limbs + p_x_low_limbs_shift * LIMB_SHIFT) - x_lo_y_hi; - tmp_43 *= lagrange_even_in_minicircuit; - tmp_43 *= scaling_factor; - std::get<42>(accumulators) += tmp_43; - - // Contribution 44, decompose x_hi - auto tmp_44 = (p_x_high_limbs + p_x_high_limbs_shift * LIMB_SHIFT) - x_hi_z_1; - tmp_44 *= lagrange_even_in_minicircuit; - tmp_44 *= scaling_factor; - std::get<43>(accumulators) += tmp_44; - // Contribution 45, decompose y_lo - auto tmp_45 = (p_y_low_limbs + p_y_low_limbs_shift * LIMB_SHIFT) - y_lo_z_2; - tmp_45 *= lagrange_even_in_minicircuit; - tmp_45 *= scaling_factor; - std::get<44>(accumulators) += tmp_45; - - // Contribution 46, decompose y_hi - auto tmp_46 = (p_y_high_limbs + p_y_high_limbs_shift * LIMB_SHIFT) - x_lo_y_hi_shift; - tmp_46 *= lagrange_even_in_minicircuit; - tmp_46 *= scaling_factor; - std::get<45>(accumulators) += tmp_46; - - // Contribution 47, decompose z1 - auto tmp_47 = (z_low_limbs + z_high_limbs * LIMB_SHIFT) - x_hi_z_1_shift; - tmp_47 *= lagrange_even_in_minicircuit; - tmp_47 *= scaling_factor; - std::get<46>(accumulators) += tmp_47; - - // Contribution 48, decompose z2 - auto tmp_48 = (z_low_limbs_shift + z_high_limbs_shift * LIMB_SHIFT) - y_lo_z_2_shift; - tmp_48 *= lagrange_even_in_minicircuit; - tmp_48 *= scaling_factor; - std::get<47>(accumulators) += tmp_48; + [&]() { + // Within the no-op range i.e. when the op polynomial is 0 at even index the 2 Translator trace rows are empty + // except for the accumulator binary limbs which get transferred across the no-op range + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using View = typename Accumulator::View; + + // Values to multiply an element by to perform an appropriate shift + static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); + static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; + + auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); + auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); + auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); + auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); + auto accumulator_low_limbs_range_constraint_0 = View(in.accumulator_low_limbs_range_constraint_0); + auto accumulator_low_limbs_range_constraint_1 = View(in.accumulator_low_limbs_range_constraint_1); + auto accumulator_low_limbs_range_constraint_2 = View(in.accumulator_low_limbs_range_constraint_2); + auto accumulator_low_limbs_range_constraint_3 = View(in.accumulator_low_limbs_range_constraint_3); + auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); + auto accumulator_low_limbs_range_constraint_0_shift = View(in.accumulator_low_limbs_range_constraint_0_shift); + auto accumulator_low_limbs_range_constraint_1_shift = View(in.accumulator_low_limbs_range_constraint_1_shift); + auto accumulator_low_limbs_range_constraint_2_shift = View(in.accumulator_low_limbs_range_constraint_2_shift); + auto accumulator_low_limbs_range_constraint_3_shift = View(in.accumulator_low_limbs_range_constraint_3_shift); + auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); + auto accumulator_high_limbs_range_constraint_0 = View(in.accumulator_high_limbs_range_constraint_0); + auto accumulator_high_limbs_range_constraint_1 = View(in.accumulator_high_limbs_range_constraint_1); + auto accumulator_high_limbs_range_constraint_2 = View(in.accumulator_high_limbs_range_constraint_2); + auto accumulator_high_limbs_range_constraint_3 = View(in.accumulator_high_limbs_range_constraint_3); + auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); + auto accumulator_high_limbs_range_constraint_0_shift = View(in.accumulator_high_limbs_range_constraint_0_shift); + auto accumulator_high_limbs_range_constraint_1_shift = View(in.accumulator_high_limbs_range_constraint_1_shift); + auto accumulator_high_limbs_range_constraint_2_shift = View(in.accumulator_high_limbs_range_constraint_2_shift); + auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); + auto op = View(in.op); + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + auto not_even_or_no_op_scaled = lagrange_even_in_minicircuit * op * scaling_factor; + + // Contribution 1, accumulator lowest limb decomposition + auto tmp_1 = + ((accumulator_low_limbs_range_constraint_0 + accumulator_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + accumulator_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + accumulator_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + accumulator_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_0); + tmp_1 *= not_even_or_no_op_scaled; + std::get<0>(accumulators) += tmp_1; + + // Contribution 2, accumulator second limb decomposition + auto tmp_2 = ((accumulator_low_limbs_range_constraint_0_shift + + accumulator_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + accumulator_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + accumulator_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + accumulator_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_1); + tmp_2 *= not_even_or_no_op_scaled; + std::get<1>(accumulators) += tmp_2; + + // Contribution 3, accumulator second highest limb decomposition + auto tmp_3 = + ((accumulator_high_limbs_range_constraint_0 + accumulator_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + accumulator_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + accumulator_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + accumulator_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_2); + tmp_3 *= not_even_or_no_op_scaled; + std::get<2>(accumulators) += tmp_3; + + // Contribution 4, accumulator highest limb decomposition + auto tmp_4 = ((accumulator_high_limbs_range_constraint_0_shift + + accumulator_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + accumulator_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + accumulator_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + accumulators_binary_limbs_3); + tmp_4 *= not_even_or_no_op_scaled; + std::get<3>(accumulators) += tmp_4; + }(); + + [&]() { + using Accumulator = std::tuple_element_t<4, ContainerOverSubrelations>; + using View = typename Accumulator::View; + + // Value to multiply an element by to perform an appropriate shift + static auto LIMB_SHIFT = FF(uint256_t(1) << NUM_LIMB_BITS); + + // Values to multiply an element by to perform an appropriate shift + static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); + static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx5 = MICRO_LIMB_SHIFTx4 * MICRO_LIMB_SHIFT; + + auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); + auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); + auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); + + // Shifts used to constrain ranges further + static auto SHIFT_12_TO_14 = + FF(4); // Shift used to range constrain the last microlimb of 68-bit limbs (standard limbs) + static auto SHIFT_10_TO_14 = + FF(16); // Shift used to range constrain the last microlimb of 52-bit limb (top quotient limb) + static auto SHIFT_8_TO_14 = FF(64); // Shift used to range constrain the last microlimb of 50-bit + // limbs (top limb of standard 254-bit value) + static auto SHIFT_4_TO_14 = + FF(1024); // Shift used to range constrain the last mircrolimb of 60-bit limbs from z scalars + + auto p_x_low_limbs = View(in.p_x_low_limbs); + auto p_x_low_limbs_range_constraint_0 = View(in.p_x_low_limbs_range_constraint_0); + auto p_x_low_limbs_range_constraint_1 = View(in.p_x_low_limbs_range_constraint_1); + auto p_x_low_limbs_range_constraint_2 = View(in.p_x_low_limbs_range_constraint_2); + auto p_x_low_limbs_range_constraint_3 = View(in.p_x_low_limbs_range_constraint_3); + auto p_x_low_limbs_range_constraint_4 = View(in.p_x_low_limbs_range_constraint_4); + auto p_x_low_limbs_shift = View(in.p_x_low_limbs_shift); + auto p_x_low_limbs_range_constraint_0_shift = View(in.p_x_low_limbs_range_constraint_0_shift); + auto p_x_low_limbs_range_constraint_1_shift = View(in.p_x_low_limbs_range_constraint_1_shift); + auto p_x_low_limbs_range_constraint_2_shift = View(in.p_x_low_limbs_range_constraint_2_shift); + auto p_x_low_limbs_range_constraint_3_shift = View(in.p_x_low_limbs_range_constraint_3_shift); + auto p_x_low_limbs_range_constraint_4_shift = View(in.p_x_low_limbs_range_constraint_4_shift); + auto p_x_high_limbs = View(in.p_x_high_limbs); + auto p_x_high_limbs_range_constraint_0 = View(in.p_x_high_limbs_range_constraint_0); + auto p_x_high_limbs_range_constraint_1 = View(in.p_x_high_limbs_range_constraint_1); + auto p_x_high_limbs_range_constraint_2 = View(in.p_x_high_limbs_range_constraint_2); + auto p_x_high_limbs_range_constraint_3 = View(in.p_x_high_limbs_range_constraint_3); + auto p_x_high_limbs_range_constraint_4 = View(in.p_x_high_limbs_range_constraint_4); + auto p_x_high_limbs_shift = View(in.p_x_high_limbs_shift); + auto p_x_high_limbs_range_constraint_0_shift = View(in.p_x_high_limbs_range_constraint_0_shift); + auto p_x_high_limbs_range_constraint_1_shift = View(in.p_x_high_limbs_range_constraint_1_shift); + auto p_x_high_limbs_range_constraint_2_shift = View(in.p_x_high_limbs_range_constraint_2_shift); + auto p_x_high_limbs_range_constraint_3_shift = View(in.p_x_high_limbs_range_constraint_3_shift); + auto p_y_low_limbs = View(in.p_y_low_limbs); + auto p_y_low_limbs_range_constraint_0 = View(in.p_y_low_limbs_range_constraint_0); + auto p_y_low_limbs_range_constraint_1 = View(in.p_y_low_limbs_range_constraint_1); + auto p_y_low_limbs_range_constraint_2 = View(in.p_y_low_limbs_range_constraint_2); + auto p_y_low_limbs_range_constraint_3 = View(in.p_y_low_limbs_range_constraint_3); + auto p_y_low_limbs_range_constraint_4 = View(in.p_y_low_limbs_range_constraint_4); + auto p_y_low_limbs_shift = View(in.p_y_low_limbs_shift); + auto p_y_low_limbs_range_constraint_0_shift = View(in.p_y_low_limbs_range_constraint_0_shift); + auto p_y_low_limbs_range_constraint_1_shift = View(in.p_y_low_limbs_range_constraint_1_shift); + auto p_y_low_limbs_range_constraint_2_shift = View(in.p_y_low_limbs_range_constraint_2_shift); + auto p_y_low_limbs_range_constraint_3_shift = View(in.p_y_low_limbs_range_constraint_3_shift); + auto p_y_low_limbs_range_constraint_4_shift = View(in.p_y_low_limbs_range_constraint_4_shift); + auto p_y_high_limbs = View(in.p_y_high_limbs); + auto p_y_high_limbs_range_constraint_0 = View(in.p_y_high_limbs_range_constraint_0); + auto p_y_high_limbs_range_constraint_1 = View(in.p_y_high_limbs_range_constraint_1); + auto p_y_high_limbs_range_constraint_2 = View(in.p_y_high_limbs_range_constraint_2); + auto p_y_high_limbs_range_constraint_3 = View(in.p_y_high_limbs_range_constraint_3); + auto p_y_high_limbs_range_constraint_4 = View(in.p_y_high_limbs_range_constraint_4); + auto p_y_high_limbs_shift = View(in.p_y_high_limbs_shift); + auto p_y_high_limbs_range_constraint_0_shift = View(in.p_y_high_limbs_range_constraint_0_shift); + auto p_y_high_limbs_range_constraint_1_shift = View(in.p_y_high_limbs_range_constraint_1_shift); + auto p_y_high_limbs_range_constraint_2_shift = View(in.p_y_high_limbs_range_constraint_2_shift); + auto p_y_high_limbs_range_constraint_3_shift = View(in.p_y_high_limbs_range_constraint_3_shift); + auto z_low_limbs = View(in.z_low_limbs); + auto z_low_limbs_range_constraint_0 = View(in.z_low_limbs_range_constraint_0); + auto z_low_limbs_range_constraint_1 = View(in.z_low_limbs_range_constraint_1); + auto z_low_limbs_range_constraint_2 = View(in.z_low_limbs_range_constraint_2); + auto z_low_limbs_range_constraint_3 = View(in.z_low_limbs_range_constraint_3); + auto z_low_limbs_range_constraint_4 = View(in.z_low_limbs_range_constraint_4); + auto z_low_limbs_shift = View(in.z_low_limbs_shift); + auto z_low_limbs_range_constraint_0_shift = View(in.z_low_limbs_range_constraint_0_shift); + auto z_low_limbs_range_constraint_1_shift = View(in.z_low_limbs_range_constraint_1_shift); + auto z_low_limbs_range_constraint_2_shift = View(in.z_low_limbs_range_constraint_2_shift); + auto z_low_limbs_range_constraint_3_shift = View(in.z_low_limbs_range_constraint_3_shift); + auto z_low_limbs_range_constraint_4_shift = View(in.z_low_limbs_range_constraint_4_shift); + auto z_high_limbs = View(in.z_high_limbs); + auto z_high_limbs_range_constraint_0 = View(in.z_high_limbs_range_constraint_0); + auto z_high_limbs_range_constraint_1 = View(in.z_high_limbs_range_constraint_1); + auto z_high_limbs_range_constraint_2 = View(in.z_high_limbs_range_constraint_2); + auto z_high_limbs_range_constraint_3 = View(in.z_high_limbs_range_constraint_3); + auto z_high_limbs_range_constraint_4 = View(in.z_high_limbs_range_constraint_4); + auto z_high_limbs_shift = View(in.z_high_limbs_shift); + auto z_high_limbs_range_constraint_0_shift = View(in.z_high_limbs_range_constraint_0_shift); + auto z_high_limbs_range_constraint_1_shift = View(in.z_high_limbs_range_constraint_1_shift); + auto z_high_limbs_range_constraint_2_shift = View(in.z_high_limbs_range_constraint_2_shift); + auto z_high_limbs_range_constraint_3_shift = View(in.z_high_limbs_range_constraint_3_shift); + auto z_high_limbs_range_constraint_4_shift = View(in.z_high_limbs_range_constraint_4_shift); + auto quotient_low_binary_limbs = View(in.quotient_low_binary_limbs); + auto quotient_low_limbs_range_constraint_0 = View(in.quotient_low_limbs_range_constraint_0); + auto quotient_low_limbs_range_constraint_1 = View(in.quotient_low_limbs_range_constraint_1); + auto quotient_low_limbs_range_constraint_2 = View(in.quotient_low_limbs_range_constraint_2); + auto quotient_low_limbs_range_constraint_3 = View(in.quotient_low_limbs_range_constraint_3); + auto quotient_low_limbs_range_constraint_4 = View(in.quotient_low_limbs_range_constraint_4); + auto quotient_low_binary_limbs_shift = View(in.quotient_low_binary_limbs_shift); + auto quotient_low_limbs_range_constraint_0_shift = View(in.quotient_low_limbs_range_constraint_0_shift); + auto quotient_low_limbs_range_constraint_1_shift = View(in.quotient_low_limbs_range_constraint_1_shift); + auto quotient_low_limbs_range_constraint_2_shift = View(in.quotient_low_limbs_range_constraint_2_shift); + auto quotient_low_limbs_range_constraint_3_shift = View(in.quotient_low_limbs_range_constraint_3_shift); + auto quotient_low_limbs_range_constraint_4_shift = View(in.quotient_low_limbs_range_constraint_4_shift); + auto quotient_high_binary_limbs = View(in.quotient_high_binary_limbs); + auto quotient_high_limbs_range_constraint_0 = View(in.quotient_high_limbs_range_constraint_0); + auto quotient_high_limbs_range_constraint_1 = View(in.quotient_high_limbs_range_constraint_1); + auto quotient_high_limbs_range_constraint_2 = View(in.quotient_high_limbs_range_constraint_2); + auto quotient_high_limbs_range_constraint_3 = View(in.quotient_high_limbs_range_constraint_3); + auto quotient_high_limbs_range_constraint_4 = View(in.quotient_high_limbs_range_constraint_4); + auto quotient_high_binary_limbs_shift = View(in.quotient_high_binary_limbs_shift); + auto quotient_high_limbs_range_constraint_0_shift = View(in.quotient_high_limbs_range_constraint_0_shift); + auto quotient_high_limbs_range_constraint_1_shift = View(in.quotient_high_limbs_range_constraint_1_shift); + auto quotient_high_limbs_range_constraint_2_shift = View(in.quotient_high_limbs_range_constraint_2_shift); + auto quotient_high_limbs_range_constraint_3_shift = View(in.quotient_high_limbs_range_constraint_3_shift); + auto relation_wide_limbs = View(in.relation_wide_limbs); + auto relation_wide_limbs_range_constraint_0 = View(in.relation_wide_limbs_range_constraint_0); + auto relation_wide_limbs_range_constraint_1 = View(in.relation_wide_limbs_range_constraint_1); + auto relation_wide_limbs_range_constraint_2 = View(in.relation_wide_limbs_range_constraint_2); + auto relation_wide_limbs_range_constraint_3 = View(in.relation_wide_limbs_range_constraint_3); + auto p_x_high_limbs_range_constraint_tail_shift = View(in.p_x_high_limbs_range_constraint_tail_shift); + auto accumulator_high_limbs_range_constraint_tail_shift = + View(in.accumulator_high_limbs_range_constraint_tail_shift); + auto relation_wide_limbs_shift = View(in.relation_wide_limbs_shift); + auto relation_wide_limbs_range_constraint_0_shift = View(in.relation_wide_limbs_range_constraint_0_shift); + auto relation_wide_limbs_range_constraint_1_shift = View(in.relation_wide_limbs_range_constraint_1_shift); + auto relation_wide_limbs_range_constraint_2_shift = View(in.relation_wide_limbs_range_constraint_2_shift); + auto relation_wide_limbs_range_constraint_3_shift = View(in.relation_wide_limbs_range_constraint_3_shift); + auto p_y_high_limbs_range_constraint_tail_shift = View(in.p_y_high_limbs_range_constraint_tail_shift); + auto quotient_high_limbs_range_constraint_tail_shift = View(in.quotient_high_limbs_range_constraint_tail_shift); + auto p_x_low_limbs_range_constraint_tail = View(in.p_x_low_limbs_range_constraint_tail); + auto p_x_low_limbs_range_constraint_tail_shift = View(in.p_x_low_limbs_range_constraint_tail_shift); + auto p_x_high_limbs_range_constraint_tail = View(in.p_x_high_limbs_range_constraint_tail); + auto p_x_high_limbs_range_constraint_4_shift = View(in.p_x_high_limbs_range_constraint_4_shift); + auto p_y_low_limbs_range_constraint_tail = View(in.p_y_low_limbs_range_constraint_tail); + auto p_y_low_limbs_range_constraint_tail_shift = View(in.p_y_low_limbs_range_constraint_tail_shift); + auto p_y_high_limbs_range_constraint_tail = View(in.p_y_high_limbs_range_constraint_tail); + auto p_y_high_limbs_range_constraint_4_shift = View(in.p_y_high_limbs_range_constraint_4_shift); + auto z_low_limbs_range_constraint_tail = View(in.z_low_limbs_range_constraint_tail); + auto z_low_limbs_range_constraint_tail_shift = View(in.z_low_limbs_range_constraint_tail_shift); + auto z_high_limbs_range_constraint_tail = View(in.z_high_limbs_range_constraint_tail); + auto z_high_limbs_range_constraint_tail_shift = View(in.z_high_limbs_range_constraint_tail_shift); + auto accumulator_low_limbs_range_constraint_tail = View(in.accumulator_low_limbs_range_constraint_tail); + auto accumulator_low_limbs_range_constraint_tail_shift = + View(in.accumulator_low_limbs_range_constraint_tail_shift); + auto accumulator_high_limbs_range_constraint_tail = View(in.accumulator_high_limbs_range_constraint_tail); + auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); + auto accumulator_high_limbs_range_constraint_4_shift = View(in.accumulator_high_limbs_range_constraint_4_shift); + auto quotient_low_limbs_range_constraint_tail = View(in.quotient_low_limbs_range_constraint_tail); + auto quotient_low_limbs_range_constraint_tail_shift = View(in.quotient_low_limbs_range_constraint_tail_shift); + auto quotient_high_limbs_range_constraint_tail = View(in.quotient_high_limbs_range_constraint_tail); + auto quotient_high_limbs_range_constraint_4_shift = View(in.quotient_high_limbs_range_constraint_4_shift); + auto x_lo_y_hi = View(in.x_lo_y_hi); + auto x_hi_z_1 = View(in.x_hi_z_1); + auto y_lo_z_2 = View(in.y_lo_z_2); + auto x_lo_y_hi_shift = View(in.x_lo_y_hi_shift); + auto x_hi_z_1_shift = View(in.x_hi_z_1_shift); + auto y_lo_z_2_shift = View(in.y_lo_z_2_shift); + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + + // Contribution 5 , P_y lowest limb decomposition + auto tmp_5 = ((p_y_low_limbs_range_constraint_0 + p_y_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_y_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_y_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_y_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_y_low_limbs); + tmp_5 *= lagrange_even_in_minicircuit; + tmp_5 *= scaling_factor; + std::get<4>(accumulators) += tmp_5; + + // Contribution 6 , P_y second lowest limb decomposition + auto tmp_6 = + ((p_y_low_limbs_range_constraint_0_shift + p_y_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_y_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_y_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_y_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + p_y_low_limbs_shift); + tmp_6 *= lagrange_even_in_minicircuit; + tmp_6 *= scaling_factor; + std::get<5>(accumulators) += tmp_6; + + // Contribution 7 , P_y third limb decomposition + auto tmp_7 = ((p_y_high_limbs_range_constraint_0 + p_y_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_y_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_y_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_y_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_y_high_limbs); + tmp_7 *= lagrange_even_in_minicircuit; + tmp_7 *= scaling_factor; + std::get<6>(accumulators) += tmp_7; + + // Contribution 8 , P_y highest limb decomposition + auto tmp_8 = + ((p_y_high_limbs_range_constraint_0_shift + p_y_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_y_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_y_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + p_y_high_limbs_shift); + tmp_8 *= lagrange_even_in_minicircuit; + tmp_8 *= scaling_factor; + std::get<7>(accumulators) += tmp_8; + + // Contribution 9 , z_1 low limb decomposition + auto tmp_9 = ((z_low_limbs_range_constraint_0 + z_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + z_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + z_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + z_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + z_low_limbs); + tmp_9 *= lagrange_even_in_minicircuit; + tmp_9 *= scaling_factor; + std::get<8>(accumulators) += tmp_9; + + // Contribution 10 , z_2 low limb decomposition + auto tmp_10 = ((z_low_limbs_range_constraint_0_shift + z_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + z_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + z_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + z_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + z_low_limbs_shift); + tmp_10 *= lagrange_even_in_minicircuit; + tmp_10 *= scaling_factor; + std::get<9>(accumulators) += tmp_10; + + // Contribution 11 , z_1 high limb decomposition + auto tmp_11 = ((z_high_limbs_range_constraint_0 + z_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + z_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + z_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + z_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + z_high_limbs); + tmp_11 *= lagrange_even_in_minicircuit; + tmp_11 *= scaling_factor; + std::get<10>(accumulators) += tmp_11; + + // Contribution 12 , z_2 high limb decomposition + auto tmp_12 = + ((z_high_limbs_range_constraint_0_shift + z_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + z_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + z_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + z_high_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + z_high_limbs_shift); + tmp_12 *= lagrange_even_in_minicircuit; + tmp_12 *= scaling_factor; + std::get<11>(accumulators) += tmp_12; + + // Contributions that decompose 50, 52, 68 or 84 bit limbs used for computation into range-constrained chunks + // Contribution 13, P_x lowest limb decomposition + auto tmp_13 = ((p_x_low_limbs_range_constraint_0 + p_x_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_x_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_x_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_x_low_limbs); + tmp_13 *= lagrange_even_in_minicircuit; + tmp_13 *= scaling_factor; + std::get<12>(accumulators) += tmp_13; + + // Contribution 14 , P_x second lowest limb decomposition + auto tmp_14 = + ((p_x_low_limbs_range_constraint_0_shift + p_x_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_x_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_x_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_x_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + p_x_low_limbs_shift); + tmp_14 *= lagrange_even_in_minicircuit; + tmp_14 *= scaling_factor; + std::get<13>(accumulators) += tmp_14; + + // Contribution 15 , P_x third limb decomposition + auto tmp_15 = ((p_x_high_limbs_range_constraint_0 + p_x_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_x_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_x_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_x_high_limbs); + tmp_15 *= lagrange_even_in_minicircuit; + tmp_15 *= scaling_factor; + std::get<14>(accumulators) += tmp_15; + + // Contribution 16 , P_x highest limb decomposition + auto tmp_16 = + ((p_x_high_limbs_range_constraint_0_shift + p_x_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_x_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_x_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + p_x_high_limbs_shift); + tmp_16 *= lagrange_even_in_minicircuit; + tmp_16 *= scaling_factor; + std::get<15>(accumulators) += tmp_16; + + // Contribution 17 , quotient lowest limb decomposition + auto tmp_17 = + ((quotient_low_limbs_range_constraint_0 + quotient_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + quotient_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + quotient_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + quotient_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + quotient_low_binary_limbs); + tmp_17 *= lagrange_even_in_minicircuit; + tmp_17 *= scaling_factor; + std::get<16>(accumulators) += tmp_17; + // Contribution 18 , quotient second lowest limb decomposition + auto tmp_18 = ((quotient_low_limbs_range_constraint_0_shift + + quotient_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + quotient_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + quotient_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + quotient_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + quotient_low_binary_limbs_shift); + tmp_18 *= lagrange_even_in_minicircuit; + tmp_18 *= scaling_factor; + std::get<17>(accumulators) += tmp_18; + + // Contribution 19 , quotient second highest limb decomposition + auto tmp_19 = + ((quotient_high_limbs_range_constraint_0 + quotient_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + quotient_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + quotient_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + quotient_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + quotient_high_binary_limbs); + tmp_19 *= lagrange_even_in_minicircuit; + tmp_19 *= scaling_factor; + std::get<18>(accumulators) += tmp_19; + // Contribution 20 , quotient highest limb decomposition + auto tmp_20 = ((quotient_high_limbs_range_constraint_0_shift + + quotient_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + quotient_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + quotient_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + quotient_high_binary_limbs_shift); + tmp_20 *= lagrange_even_in_minicircuit; + tmp_20 *= scaling_factor; + std::get<19>(accumulators) += tmp_20; + + // Contribution 21 , decomposition of the low wide relation limb used for the bigfield relation. + // N.B. top microlimbs of relation wide limbs are stored in microlimbs for range constraints of P_x, P_y, + // accumulator and quotient. This is to save space and because these microlimbs are not used by their namesakes, + // since top limbs in 254/6-bit values use one less microlimb for the top 50/52-bit limb + auto tmp_21 = + ((relation_wide_limbs_range_constraint_0 + relation_wide_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + relation_wide_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + relation_wide_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + + accumulator_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - + relation_wide_limbs); + tmp_21 *= lagrange_even_in_minicircuit; + tmp_21 *= scaling_factor; + std::get<20>(accumulators) += tmp_21; + + // Contribution 22 , decomposition of high relation limb + auto tmp_22 = ((relation_wide_limbs_range_constraint_0_shift + + relation_wide_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + relation_wide_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + relation_wide_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_y_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + + quotient_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - + relation_wide_limbs_shift); + tmp_22 *= lagrange_even_in_minicircuit; + tmp_22 *= scaling_factor; + std::get<21>(accumulators) += tmp_22; + + // Contributions enfocing a reduced range constraint on high limbs (these relation force the last microlimb in + // each limb to be more severely range constrained) + + // Contribution 23, range constrain the highest microlimb of lowest P.x limb to be 12 bits (68 % 14 = 12) + auto tmp_23 = p_x_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail; + tmp_23 *= lagrange_even_in_minicircuit; + tmp_23 *= scaling_factor; + std::get<22>(accumulators) += tmp_23; + + // Contribution 24, range constrain the highest microlimb of second lowest P.x limb to be 12 bits + auto tmp_24 = + p_x_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail_shift; + tmp_24 *= lagrange_even_in_minicircuit; + tmp_24 *= scaling_factor; + std::get<23>(accumulators) += tmp_24; + + // Contribution 25, range constrain the highest microlimb of second highest P.x limb to be 12 bits + auto tmp_25 = p_x_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_high_limbs_range_constraint_tail; + tmp_25 *= lagrange_even_in_minicircuit; + tmp_25 *= scaling_factor; + std::get<24>(accumulators) += tmp_25; + + // Contribution 26, range constrain the highest microilmb of highest P.x limb to be 8 bits (50 % 14 = 8) + auto tmp_26 = + (p_x_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_x_high_limbs_range_constraint_4_shift); + + tmp_26 *= lagrange_even_in_minicircuit; + tmp_26 *= scaling_factor; + std::get<25>(accumulators) += tmp_26; + + // Contribution 27, range constrain the highest microlimb of lowest P.y limb to be 12 bits (68 % 14 = 12) + auto tmp_27 = p_y_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail; + tmp_27 *= lagrange_even_in_minicircuit; + tmp_27 *= scaling_factor; + std::get<26>(accumulators) += tmp_27; + + // Contribution 28, range constrain the highest microlimb of second lowest P.y limb to be 12 bits (68 % 14 = 12) + auto tmp_28 = + p_y_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail_shift; + tmp_28 *= lagrange_even_in_minicircuit; + tmp_28 *= scaling_factor; + std::get<27>(accumulators) += tmp_28; + + // Contribution 29, range constrain the highest microlimb of second highest P.y limb to be 12 bits (68 % 14 = + // 12) + auto tmp_29 = p_y_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_high_limbs_range_constraint_tail; + tmp_29 *= lagrange_even_in_minicircuit; + tmp_29 *= scaling_factor; + std::get<28>(accumulators) += tmp_29; + + // Contribution 30, range constrain the highest microlimb of highest P.y limb to be 8 bits (50 % 14 = 8) + auto tmp_30 = + (p_y_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_y_high_limbs_range_constraint_4_shift); + + tmp_30 *= lagrange_even_in_minicircuit; + tmp_30 *= scaling_factor; + std::get<29>(accumulators) += tmp_30; + + // Contribution 31, range constrain the highest microlimb of low z1 limb to be 12 bits (68 % 14 = 12) + auto tmp_31 = (z_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail); + tmp_31 *= lagrange_even_in_minicircuit; + tmp_31 *= scaling_factor; + std::get<30>(accumulators) += tmp_31; + + // Contribution 32, range constrain the highest microlimb of low z2 limb to be 12 bits (68 % 14 = 12) + auto tmp_32 = (z_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail_shift); + tmp_32 *= lagrange_even_in_minicircuit; + tmp_32 *= scaling_factor; + std::get<31>(accumulators) += tmp_32; + + // Contribution 33, range constrain the highest microlimb of high z1 limb to be 4 bits (60 % 14 = 12) + auto tmp_33 = (z_high_limbs_range_constraint_4 * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail); + tmp_33 *= lagrange_even_in_minicircuit; + tmp_33 *= scaling_factor; + std::get<32>(accumulators) += tmp_33; + + // Contribution 34, range constrain the highest microlimb of high z2 limb to be 4 bits (60 % 14 = 12) + auto tmp_34 = + (z_high_limbs_range_constraint_4_shift * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail_shift); + tmp_34 *= lagrange_even_in_minicircuit; + tmp_34 *= scaling_factor; + std::get<33>(accumulators) += tmp_34; + + // Contribution 35, range constrain the highest microlimb of lowest current accumulator limb to be 12 bits (68 % + // 14 = 12) + auto tmp_35 = + (accumulator_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_low_limbs_range_constraint_tail); + tmp_35 *= lagrange_even_in_minicircuit; + tmp_35 *= scaling_factor; + std::get<34>(accumulators) += tmp_35; + + // Contribution 36, range constrain the highest microlimb of second lowest current accumulator limb to be 12 + // bits (68 % 14 = 12) + auto tmp_36 = (accumulator_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - + accumulator_low_limbs_range_constraint_tail_shift); + tmp_36 *= lagrange_even_in_minicircuit; + tmp_36 *= scaling_factor; + std::get<35>(accumulators) += tmp_36; + + // Contribution 37, range constrain the highest microlimb of second highest current accumulator limb to be 12 + // bits (68 % 14 = 12) + auto tmp_37 = + (accumulator_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_high_limbs_range_constraint_tail); + tmp_37 *= lagrange_even_in_minicircuit; + tmp_37 *= scaling_factor; + std::get<36>(accumulators) += tmp_37; + + // Contribution 38, range constrain the highest microlimb of highest current accumulator limb to be 8 bits (50 % + // 14 = 12) + auto tmp_38 = (accumulator_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - + accumulator_high_limbs_range_constraint_4_shift); + tmp_38 *= lagrange_even_in_minicircuit; + tmp_38 *= scaling_factor; + std::get<37>(accumulators) += tmp_38; + + // Contribution 39, range constrain the highest microlimb of lowest quotient limb to be 12 bits (68 % 14 = 12) + auto tmp_39 = + (quotient_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail); + tmp_39 *= lagrange_even_in_minicircuit; + tmp_39 *= scaling_factor; + std::get<38>(accumulators) += tmp_39; + + // Contribution 40, range constrain the highest microlimb of second lowest quotient limb to be 12 bits (68 % 14 + // = 12) + auto tmp_40 = (quotient_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - + quotient_low_limbs_range_constraint_tail_shift); + tmp_40 *= lagrange_even_in_minicircuit; + tmp_40 *= scaling_factor; + std::get<39>(accumulators) += tmp_40; + + // Contribution 41, range constrain the highest microlimb of second highest quotient limb to be 12 bits (68 % 14 + // = 12) + auto tmp_41 = + (quotient_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_high_limbs_range_constraint_tail); + tmp_41 *= lagrange_even_in_minicircuit; + tmp_41 *= scaling_factor; + std::get<40>(accumulators) += tmp_41; + + // Contribution 42, range constrain the highest microlimb of highest quotient limb to be 10 bits (52 % 14 = 12) + auto tmp_42 = (quotient_high_limbs_range_constraint_3_shift * SHIFT_10_TO_14 - + quotient_high_limbs_range_constraint_4_shift); + tmp_42 *= lagrange_even_in_minicircuit; + tmp_42 *= scaling_factor; + std::get<41>(accumulators) += tmp_42; + + // Contributions where we decompose initial EccOpQueue values into 68-bit limbs + + // Contribution 43, decompose x_lo + auto tmp_43 = (p_x_low_limbs + p_x_low_limbs_shift * LIMB_SHIFT) - x_lo_y_hi; + tmp_43 *= lagrange_even_in_minicircuit; + tmp_43 *= scaling_factor; + std::get<42>(accumulators) += tmp_43; + + // Contribution 44, decompose x_hi + auto tmp_44 = (p_x_high_limbs + p_x_high_limbs_shift * LIMB_SHIFT) - x_hi_z_1; + tmp_44 *= lagrange_even_in_minicircuit; + tmp_44 *= scaling_factor; + std::get<43>(accumulators) += tmp_44; + // Contribution 45, decompose y_lo + auto tmp_45 = (p_y_low_limbs + p_y_low_limbs_shift * LIMB_SHIFT) - y_lo_z_2; + tmp_45 *= lagrange_even_in_minicircuit; + tmp_45 *= scaling_factor; + std::get<44>(accumulators) += tmp_45; + + // Contribution 46, decompose y_hi + auto tmp_46 = (p_y_high_limbs + p_y_high_limbs_shift * LIMB_SHIFT) - x_lo_y_hi_shift; + tmp_46 *= lagrange_even_in_minicircuit; + tmp_46 *= scaling_factor; + std::get<45>(accumulators) += tmp_46; + + // Contribution 47, decompose z1 + auto tmp_47 = (z_low_limbs + z_high_limbs * LIMB_SHIFT) - x_hi_z_1_shift; + tmp_47 *= lagrange_even_in_minicircuit; + tmp_47 *= scaling_factor; + std::get<46>(accumulators) += tmp_47; + + // Contribution 48, decompose z2 + auto tmp_48 = (z_low_limbs_shift + z_high_limbs_shift * LIMB_SHIFT) - y_lo_z_2_shift; + tmp_48 *= lagrange_even_in_minicircuit; + tmp_48 *= scaling_factor; + std::get<47>(accumulators) += tmp_48; + }(); }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp index 344988a16082..060839382f9e 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp @@ -14,16 +14,24 @@ template class TranslatorOpcodeConstraintRelationImpl { using FF = FF_; // 1 + polynomial degree of this relation - static constexpr size_t RELATION_LENGTH = 6; // degree((lagrange_masking - 1)⋅op ⋅(op - 3)⋅(op - 4)⋅(op - 8)) = 5 - static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 6 // opcode constraint relation + static constexpr size_t RELATION_LENGTH = 6; + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 6, // opcode constraint relation + 6, // opcode constraint relation + 6, // opcode constraint relation + 6, // opcode constraint relation + 6 // opcode constraint relation }; /** * @brief Returns true if the contribution from all subrelations for the provided inputs is identically zero * */ - template inline static bool skip(const AllEntities& in) { return in.op.is_zero(); } + template static bool skip(const AllEntities& in) + { + // All contributions are zero outside the minicircuit or at odd indices not masked + return (in.lagrange_even_in_minicircuit + in.lagrange_mini_masking).is_zero(); + } /** * @brief Expression for enforcing the value of the Opcode to be {0,3,4,8} * @details This relation enforces the opcode to be one of described values. Since we don't care about even @@ -72,9 +80,13 @@ template class TranslatorAccumulatorTransferRelationImpl { * slower. * */ - template inline static bool skip(const AllEntities& in) + template static bool skip(const AllEntities& in) { - return (in.lagrange_odd_in_minicircuit + in.lagrange_last_in_minicircuit + in.lagrange_result_row).is_zero(); + // All contributions are zero outside the minicircuit or at even indices within the minicircuite excluding + // masked areas (except from the last and result row in minicircuit) + return (in.lagrange_odd_in_minicircuit + in.lagrange_last_in_minicircuit + in.lagrange_result_row + + in.lagrange_mini_masking) + .is_zero(); } /** * @brief Relation enforcing non-arithmetic transitions of accumulator (value that is tracking the batched @@ -103,7 +115,7 @@ template class TranslatorZeroConstraintsRelationImpl { // 1 + polynomial degree of this relation static constexpr size_t RELATION_LENGTH = 4; // degree((some lagrange)(A)) = 2 - static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ 4, // p_x_low_limbs_range_constraint_0 is zero outside of the minicircuit 4, // p_x_low_limbs_range_constraint_1 is zero outside of the minicircuit 4, // p_x_low_limbs_range_constraint_2 is zero outside of the minicircuit @@ -168,18 +180,25 @@ template class TranslatorZeroConstraintsRelationImpl { 4, // accumulator_high_limbs_range_constraint_tail is zero outside of the minicircuit 4, // quotient_low_limbs_range_constraint_tail is zero outside of the minicircuit 4, // quotient_high_limbs_range_constraint_tail is zero outside of the minicircuit + 4, // op is zero outside of the minicircuit + 4, // x_lo_y_hi is zero outside of the minicircuit + 4, // x_hi_z_1 is zero outside of the minicircuit + 4, // y_lo_z_2 is zero outside of the minicircuit }; /** - * @brief Might return true if the contribution from all subrelations for the provided inputs is identically zero + * @brief Returns true if the contribution from all subrelations for the provided inputs is identically zero * * */ - template inline static bool skip(const AllEntities& in) + template static bool skip(const AllEntities& in) { + // All contributions are identically zero if outside the minicircuit and masked area or when we have a + // no-op (i.e. op is zero at an even index) static constexpr auto minus_one = -FF(1); - return (in.lagrange_even_in_minicircuit + in.lagrange_last_in_minicircuit + minus_one).is_zero(); + return (in.lagrange_even_in_minicircuit + in.op + minus_one).is_zero() || + (in.lagrange_odd_in_minicircuit + in.lagrange_even_in_minicircuit + in.lagrange_mini_masking).is_zero(); } /** * @brief Relation enforcing all the range-constraint polynomials to be zero after the minicircuit diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp index 860946261690..b370beaff391 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp @@ -45,6 +45,54 @@ void TranslatorOpcodeConstraintRelationImpl::accumulate(ContainerOverSubrela tmp_1 *= (lagrange_mini_masking + minus_one); tmp_1 *= scaling_factor; std::get<0>(accumulators) += tmp_1; + + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + + auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); + auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); + auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); + auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); + auto accumulators_binary_limbs_0_shift = View(in.accumulators_binary_limbs_0_shift); + auto accumulators_binary_limbs_1_shift = View(in.accumulators_binary_limbs_1_shift); + auto accumulators_binary_limbs_2_shift = View(in.accumulators_binary_limbs_2_shift); + auto accumulators_binary_limbs_3_shift = View(in.accumulators_binary_limbs_3_shift); + + // Contribution (2) (2-5 ensure that the accumulator stays the same at even indices within the no-op range if + // one exists) + auto tmp_2 = (accumulators_binary_limbs_0 - accumulators_binary_limbs_0_shift); + tmp_2 *= (op + minus_three); + tmp_2 *= (op + minus_four); + tmp_2 *= (op + minus_eight); + tmp_2 *= lagrange_even_in_minicircuit; + tmp_2 *= scaling_factor; + std::get<1>(accumulators) += tmp_2; + + // Contribution (3) + auto tmp_3 = (accumulators_binary_limbs_1 - accumulators_binary_limbs_1_shift); + tmp_3 *= (op + minus_three); + tmp_3 *= (op + minus_four); + tmp_3 *= (op + minus_eight); + tmp_3 *= lagrange_even_in_minicircuit; + tmp_3 *= scaling_factor; + std::get<2>(accumulators) += tmp_3; + + // Contribution (4) + auto tmp_4 = (accumulators_binary_limbs_2 - accumulators_binary_limbs_2_shift); + tmp_4 *= (op + minus_three); + tmp_4 *= (op + minus_four); + tmp_4 *= (op + minus_eight); + tmp_4 *= lagrange_even_in_minicircuit; + tmp_4 *= scaling_factor; + std::get<3>(accumulators) += tmp_4; + + // Contribution (5) + auto tmp_5 = (accumulators_binary_limbs_3 - accumulators_binary_limbs_3_shift); + tmp_5 *= (op + minus_three); + tmp_5 *= (op + minus_four); + tmp_5 *= (op + minus_eight); + tmp_5 *= lagrange_even_in_minicircuit; + tmp_5 *= scaling_factor; + std::get<4>(accumulators) += tmp_5; }; /** @@ -75,8 +123,8 @@ void TranslatorAccumulatorTransferRelationImpl::accumulate(ContainerOverSubr // Lagrange ensuring the accumulator result is validated at the correct row auto lagrange_result_row = View(in.lagrange_result_row); - // Lagrange at index (size of minicircuit - 1) is used to enforce that the accumulator is initialized to zero in the - // circuit + // Lagrange at index (size of minicircuit without masking - 1) is used to enforce that the accumulator starts from + // zero auto lagrange_last_in_minicircuit = View(in.lagrange_last_in_minicircuit); // Locations of randomness in the minicircuit @@ -159,9 +207,8 @@ void TranslatorAccumulatorTransferRelationImpl::accumulate(ContainerOverSubr }; /** - * @brief Relation enforcing all the range-constraint polynomials to be zero after the minicircuit - * @details This relation ensures that while we are out of the minicircuit the range constraint polynomials are zero - * + * @brief Relation enforcing all the range-constraint and op queue polynomials to be zero after the minicircuit + * @param evals transformed to `evals + C(in(X)...)*scaling_factor` * @param in an std::array containing the fully extended Univariate edges. * @param parameters contains beta, gamma, and public_input_delta, .... @@ -247,6 +294,10 @@ void TranslatorZeroConstraintsRelationImpl::accumulate(ContainerOverSubrelat auto accumulator_high_limbs_range_constraint_tail = View(in.accumulator_high_limbs_range_constraint_tail); auto quotient_low_limbs_range_constraint_tail = View(in.quotient_low_limbs_range_constraint_tail); auto quotient_high_limbs_range_constraint_tail = View(in.quotient_high_limbs_range_constraint_tail); + auto op = View(in.op); + auto x_lo_y_hi = View(in.x_lo_y_hi); + auto x_hi_z_1 = View(in.x_hi_z_1); + auto y_lo_z_2 = View(in.y_lo_z_2); auto lagrange_mini_masking = View(in.lagrange_mini_masking); // 0 in the minicircuit, -1 outside @@ -444,5 +495,17 @@ void TranslatorZeroConstraintsRelationImpl::accumulate(ContainerOverSubrelat // Contribution 63, ensure quotient_high_limbs_range_constraint_tail is 0 outside of minicircuit std::get<63>(accumulators) += quotient_high_limbs_range_constraint_tail * not_in_mininicircuit_or_masked; + + // Contribution 64, ensure op is 0 outside of minicircuit + std::get<64>(accumulators) += op * not_in_mininicircuit_or_masked; + + // Contribution 65, ensure x_lo_y_hi is 0 outside of minicircuit + std::get<65>(accumulators) += x_lo_y_hi * not_in_mininicircuit_or_masked; + + // Contribution 66, ensure x_hi_z_1 is 0 outside of minicircuit + std::get<66>(accumulators) += x_hi_z_1 * not_in_mininicircuit_or_masked; + + // Contribution 67, ensure y_lo_z_2 is 0 outside of minicircuit + std::get<67>(accumulators) += y_lo_z_2 * not_in_mininicircuit_or_masked; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp index 219a208a957e..b3b4839dccd9 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp @@ -16,9 +16,9 @@ template class TranslatorNonNativeFieldRelationImpl { // 1 + polynomial degree of this relation static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 3, // Lower wide limb subrelation (checks result is 0 mod 2¹³⁶) - 3, // Higher wide limb subrelation (checks result is 0 in higher mod 2¹³⁶), - 3 // Prime subrelation (checks result in native field) + 4, // Lower wide limb subrelation (checks result is 0 mod 2¹³⁶) + 4, // Higher wide limb subrelation (checks result is 0 in higher mod 2¹³⁶), + 4 // Prime subrelation (checks result in native field) }; /** diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp index 6e5ed427eee4..10d8da453e2e 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp @@ -63,7 +63,9 @@ namespace bb { * which we need to calculate non-permutation relations). All other indices are set to zero. Each EccOpQueue entry * (operation) occupies 2 rows in bn254 transcripts. So the Translator VM has a 2-row cycle and we need to * switch the checks being performed depending on which row we are at right now. We have half a cycle of - * accumulation, where we perform this computation, and half a cycle where we just copy accumulator data. + * accumulation, where we perform this computation, and half a cycle where we just copy accumulator data. They also get + * multiplied by the op because the no-op range within the trace (if one exits) should imply the accumulator doesn't + * change (fully enforced by the AccumulatorTransferRelation and OpcodeRelation ) * * @param evals transformed to `evals + C(in(X)...)*scaling_factor` * @param in an std::array containing the fully extended Univariate edges. @@ -183,7 +185,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati // clang-format on // subtract large value; vanishing shows the desired relation holds on low 136-bit limb tmp -= relation_wide_limbs * shiftx2; - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<0>(accumulators) += tmp; @@ -236,7 +238,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati // clang-format on // subtract large value; vanishing shows the desired relation holds on high 136-bit limb tmp -= relation_wide_limbs_shift * shiftx2; - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<1>(accumulators) += tmp; @@ -278,7 +280,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator; // clang-format on - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<2>(accumulators) += tmp; }; diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp index cf6baecc73b8..27cb69a35ead 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp @@ -382,6 +382,7 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) const auto& x_lo_y_hi_shift = input_elements.x_lo_y_hi_shift; const auto& x_hi_z_1_shift = input_elements.x_hi_z_1_shift; const auto& y_lo_z_2_shift = input_elements.y_lo_z_2_shift; + const auto& op = input_elements.op; const auto& lagrange_even_in_minicircuit = input_elements.lagrange_even_in_minicircuit; @@ -509,29 +510,29 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) }; // Check decomposition 50-72 bit limbs into microlimbs - expected_values[0] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs); - expected_values[1] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0_shift, - p_x_low_limbs_range_constraint_1_shift, - p_x_low_limbs_range_constraint_2_shift, - p_x_low_limbs_range_constraint_3_shift, - p_x_low_limbs_range_constraint_4_shift, - p_x_low_limbs_shift); - expected_values[2] = check_standard_limb_decomposition(p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs); - expected_values[3] = check_standard_top_limb_decomposition(p_x_high_limbs_range_constraint_0_shift, - p_x_high_limbs_range_constraint_1_shift, - p_x_high_limbs_range_constraint_2_shift, - p_x_high_limbs_range_constraint_3_shift, - p_x_high_limbs_shift); + expected_values[0] = op * check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0, + accumulator_low_limbs_range_constraint_1, + accumulator_low_limbs_range_constraint_2, + accumulator_low_limbs_range_constraint_3, + accumulator_low_limbs_range_constraint_4, + accumulators_binary_limbs_0); + expected_values[1] = op * check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0_shift, + accumulator_low_limbs_range_constraint_1_shift, + accumulator_low_limbs_range_constraint_2_shift, + accumulator_low_limbs_range_constraint_3_shift, + accumulator_low_limbs_range_constraint_4_shift, + accumulators_binary_limbs_1); + expected_values[2] = op * check_standard_limb_decomposition(accumulator_high_limbs_range_constraint_0, + accumulator_high_limbs_range_constraint_1, + accumulator_high_limbs_range_constraint_2, + accumulator_high_limbs_range_constraint_3, + accumulator_high_limbs_range_constraint_4, + accumulators_binary_limbs_2); + expected_values[3] = op * check_standard_top_limb_decomposition(accumulator_high_limbs_range_constraint_0_shift, + accumulator_high_limbs_range_constraint_1_shift, + accumulator_high_limbs_range_constraint_2_shift, + accumulator_high_limbs_range_constraint_3_shift, + accumulators_binary_limbs_3); expected_values[4] = check_standard_limb_decomposition(p_y_low_limbs_range_constraint_0, p_y_low_limbs_range_constraint_1, @@ -580,29 +581,30 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) z_high_limbs_range_constraint_3_shift, z_high_limbs_range_constraint_4_shift, z_high_limbs_shift); - expected_values[12] = check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulators_binary_limbs_0); - expected_values[13] = check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0_shift, - accumulator_low_limbs_range_constraint_1_shift, - accumulator_low_limbs_range_constraint_2_shift, - accumulator_low_limbs_range_constraint_3_shift, - accumulator_low_limbs_range_constraint_4_shift, - accumulators_binary_limbs_1); - expected_values[14] = check_standard_limb_decomposition(accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulators_binary_limbs_2); - expected_values[15] = check_standard_top_limb_decomposition(accumulator_high_limbs_range_constraint_0_shift, - accumulator_high_limbs_range_constraint_1_shift, - accumulator_high_limbs_range_constraint_2_shift, - accumulator_high_limbs_range_constraint_3_shift, - accumulators_binary_limbs_3); + expected_values[12] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0, + p_x_low_limbs_range_constraint_1, + p_x_low_limbs_range_constraint_2, + p_x_low_limbs_range_constraint_3, + p_x_low_limbs_range_constraint_4, + p_x_low_limbs); + expected_values[13] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0_shift, + p_x_low_limbs_range_constraint_1_shift, + p_x_low_limbs_range_constraint_2_shift, + p_x_low_limbs_range_constraint_3_shift, + p_x_low_limbs_range_constraint_4_shift, + p_x_low_limbs_shift); + expected_values[14] = check_standard_limb_decomposition(p_x_high_limbs_range_constraint_0, + p_x_high_limbs_range_constraint_1, + p_x_high_limbs_range_constraint_2, + p_x_high_limbs_range_constraint_3, + p_x_high_limbs_range_constraint_4, + p_x_high_limbs); + expected_values[15] = check_standard_top_limb_decomposition(p_x_high_limbs_range_constraint_0_shift, + p_x_high_limbs_range_constraint_1_shift, + p_x_high_limbs_range_constraint_2_shift, + p_x_high_limbs_range_constraint_3_shift, + p_x_high_limbs_shift); + expected_values[16] = check_standard_limb_decomposition(quotient_low_limbs_range_constraint_0, quotient_low_limbs_range_constraint_1, quotient_low_limbs_range_constraint_2, @@ -735,15 +737,30 @@ TEST_F(TranslatorRelationConsistency, OpcodeConstraintRelation) const InputElements input_elements = random_inputs ? get_random_input() : get_special_input(); const auto& op = input_elements.op; + const auto& accumulators_binary_limbs_0 = input_elements.accumulators_binary_limbs_0; + const auto& accumulators_binary_limbs_1 = input_elements.accumulators_binary_limbs_1; + const auto& accumulators_binary_limbs_2 = input_elements.accumulators_binary_limbs_2; + const auto& accumulators_binary_limbs_3 = input_elements.accumulators_binary_limbs_3; + const auto& accumulators_binary_limbs_0_shift = input_elements.accumulators_binary_limbs_0_shift; + const auto& accumulators_binary_limbs_1_shift = input_elements.accumulators_binary_limbs_1_shift; + const auto& accumulators_binary_limbs_2_shift = input_elements.accumulators_binary_limbs_2_shift; + const auto& accumulators_binary_limbs_3_shift = input_elements.accumulators_binary_limbs_3_shift; + const auto& lagrange_mini_masking = input_elements.lagrange_mini_masking; + const auto& lagrange_even_in_minicircuit = input_elements.lagrange_even_in_minicircuit; RelationValues expected_values; const auto parameters = RelationParameters::get_random(); - // (Contribution 1) - auto contribution_1 = op * (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * (lagrange_mini_masking - FF(1)); - expected_values[0] = contribution_1; + // Opcode constraints - ensure op is 0, 3, 4, or 8 + expected_values[0] = op * (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * (lagrange_mini_masking - FF(1)); + + auto shared = (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * lagrange_even_in_minicircuit; + expected_values[1] = shared * (accumulators_binary_limbs_0 - accumulators_binary_limbs_0_shift); + expected_values[2] = shared * (accumulators_binary_limbs_1 - accumulators_binary_limbs_1_shift); + expected_values[3] = shared * (accumulators_binary_limbs_2 - accumulators_binary_limbs_2_shift); + expected_values[4] = shared * (accumulators_binary_limbs_3 - accumulators_binary_limbs_3_shift); validate_relation_execution(expected_values, input_elements, parameters); }; @@ -895,7 +912,10 @@ TEST_F(TranslatorRelationConsistency, ZeroConstraintsRelation) const auto& relation_wide_limbs_range_constraint_1 = input_elements.relation_wide_limbs_range_constraint_1; const auto& relation_wide_limbs_range_constraint_2 = input_elements.relation_wide_limbs_range_constraint_2; const auto& relation_wide_limbs_range_constraint_3 = input_elements.relation_wide_limbs_range_constraint_3; - + const auto& op = input_elements.op; + const auto& x_lo_y_hi = input_elements.x_lo_y_hi; + const auto& x_hi_z_1 = input_elements.x_hi_z_1; + const auto& y_lo_z_2 = input_elements.y_lo_z_2; const auto& lagrange_odd_in_minicircuit = input_elements.lagrange_odd_in_minicircuit; const auto& lagrange_even_in_minicircuit = input_elements.lagrange_even_in_minicircuit; const auto& lagrange_mini_masking = input_elements.lagrange_mini_masking; @@ -1032,6 +1052,14 @@ TEST_F(TranslatorRelationConsistency, ZeroConstraintsRelation) (lagrange_mini_masking - FF(1)) * quotient_low_limbs_range_constraint_tail; expected_values[63] = (lagrange_even_in_minicircuit + lagrange_odd_in_minicircuit - 1) * (lagrange_mini_masking - FF(1)) * quotient_high_limbs_range_constraint_tail; + expected_values[64] = + (lagrange_even_in_minicircuit + lagrange_odd_in_minicircuit - 1) * (lagrange_mini_masking - FF(1)) * op; + expected_values[65] = (lagrange_even_in_minicircuit + lagrange_odd_in_minicircuit - 1) * + (lagrange_mini_masking - FF(1)) * x_lo_y_hi; + expected_values[66] = (lagrange_even_in_minicircuit + lagrange_odd_in_minicircuit - 1) * + (lagrange_mini_masking - FF(1)) * x_hi_z_1; + expected_values[67] = (lagrange_even_in_minicircuit + lagrange_odd_in_minicircuit - 1) * + (lagrange_mini_masking - FF(1)) * y_lo_z_2; validate_relation_execution(expected_values, input_elements, parameters); }; @@ -1119,7 +1147,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) quotient_low_binary_limbs_shift * NEGATIVE_MODULUS_LIMBS[0] - accumulators_binary_limbs_1) * shift - relation_wide_limbs * shiftx2) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; // Higher wide limb subrelation expected_values[1] = @@ -1161,7 +1189,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) quotient_low_binary_limbs * NEGATIVE_MODULUS_LIMBS[3] - accumulators_binary_limbs_3) * shift - relation_wide_limbs_shift * shiftx2) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; auto reconstructed_p_x = (p_x_low_limbs + p_x_low_limbs_shift * shift + p_x_high_limbs * shiftx2 + p_x_high_limbs_shift * shiftx3); auto reconstructed_p_y = @@ -1185,7 +1213,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) reconstructed_z1 * parameters.batching_challenge_v[2][4] + reconstructed_z2 * parameters.batching_challenge_v[3][4] + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; validate_relation_execution(expected_values, input_elements, parameters); }; diff --git a/barretenberg/cpp/src/barretenberg/relations/ultra_arithmetic_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ultra_arithmetic_relation.hpp index 8b320762796e..95e1ed14f5c2 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ultra_arithmetic_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ultra_arithmetic_relation.hpp @@ -81,7 +81,6 @@ template class UltraArithmeticRelationImpl { const Parameters&, const FF& scaling_factor) { - PROFILE_THIS_NAME("Arithmetic::accumulate"); using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; diff --git a/barretenberg/cpp/src/barretenberg/relations/utils.hpp b/barretenberg/cpp/src/barretenberg/relations/utils.hpp index 615e06f87bc8..1c478ff8f949 100644 --- a/barretenberg/cpp/src/barretenberg/relations/utils.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/utils.hpp @@ -15,6 +15,7 @@ #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/polynomials/gate_separator.hpp" #include "barretenberg/relations/relation_parameters.hpp" +#include "barretenberg/relations/relation_types.hpp" namespace bb { diff --git a/barretenberg/cpp/src/barretenberg/serialize/msgpack.hpp b/barretenberg/cpp/src/barretenberg/serialize/msgpack.hpp index b1b427ac7062..cf09bc92c195 100644 --- a/barretenberg/cpp/src/barretenberg/serialize/msgpack.hpp +++ b/barretenberg/cpp/src/barretenberg/serialize/msgpack.hpp @@ -110,6 +110,7 @@ to the object itself, do break up the above to keep a reference to the handle, f #include "msgpack_impl/concepts.hpp" #include "msgpack_impl/name_value_pair_macro.hpp" #include + #include // Helper for above documented syntax diff --git a/barretenberg/cpp/src/barretenberg/serialize/msgpack_apply.hpp b/barretenberg/cpp/src/barretenberg/serialize/msgpack_apply.hpp index 7816f8a203d7..95fe8ad76693 100644 --- a/barretenberg/cpp/src/barretenberg/serialize/msgpack_apply.hpp +++ b/barretenberg/cpp/src/barretenberg/serialize/msgpack_apply.hpp @@ -1,5 +1,6 @@ #pragma once +#include "barretenberg/common/try_catch_shim.hpp" #include "msgpack.hpp" #include "msgpack_impl/drop_keys.hpp" diff --git a/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/func_traits.hpp b/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/func_traits.hpp index f09f9d214eea..6837af6e85f5 100644 --- a/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/func_traits.hpp +++ b/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/func_traits.hpp @@ -32,7 +32,7 @@ struct func_traits : func_traits_base {}; template constexpr auto get_func_traits() { if constexpr (requires { &T::operator(); }) { - return func_traits{}; + return func_traits {}; } else { return func_traits{}; } diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/smt_verification/CMakeLists.txt index 8be1ebda22d1..af1ff32d0255 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/smt_verification/CMakeLists.txt @@ -5,21 +5,39 @@ set(CVC5_PREFIX "${CMAKE_BINARY_DIR}/_deps/cvc5") set(CVC5_BUILD "${CVC5_PREFIX}/src/cvc5-build") set(CVC5_LIB "${CVC5_BUILD}/lib/libcvc5.so") set(CVC5_INCLUDE "${CVC5_BUILD}/include") +set(CVC5_STABLE_COMMIT "5fcdb48eb26dee9385e0d0e6377fcc7e4afee85a") -ExternalProject_Add( - cvc5 - PREFIX ${CVC5_PREFIX} - GIT_REPOSITORY "https://github.com/cvc5/cvc5.git" - GIT_TAG main - BUILD_IN_SOURCE YES - CONFIGURE_COMMAND ${SHELL} ./configure.sh production --gpl --auto-download --cocoa --cryptominisat --kissat -DCMAKE_C_COMPILER=/usr/bin/clang -DCMAKE_CXX_COMPILER=/usr/bin/clang++ --prefix=${CVC5_BUILD} - BUILD_COMMAND make -C build -j8 - INSTALL_COMMAND make -C build install - UPDATE_COMMAND "" # No update step - # needed by ninja - # See https://stackoverflow.com/questions/48142082/cmake-externalproject-add-project-not-building-before-targets-that-depend-on-it - BUILD_BYPRODUCTS ${CVC5_LIB} ${CVC5_INCLUDE} -) +if(ENABLE_ASAN) + ExternalProject_Add( + cvc5 + PREFIX ${CVC5_PREFIX} + GIT_REPOSITORY "https://github.com/cvc5/cvc5.git" + GIT_TAG ${CVC5_STABLE_COMMIT} + BUILD_IN_SOURCE YES + CONFIGURE_COMMAND ${SHELL} ./configure.sh debug --gpl --auto-download --cocoa --cryptominisat --kissat -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} --prefix=${CVC5_BUILD} --asan + BUILD_COMMAND make -C build -j8 + INSTALL_COMMAND make -C build install + UPDATE_COMMAND "" # No update step + # needed by ninja + # See https://stackoverflow.com/questions/48142082/cmake-externalproject-add-project-not-building-before-targets-that-depend-on-it + BUILD_BYPRODUCTS ${CVC5_LIB} ${CVC5_INCLUDE} + ) +else() + ExternalProject_Add( + cvc5 + PREFIX ${CVC5_PREFIX} + GIT_REPOSITORY "https://github.com/cvc5/cvc5.git" + GIT_TAG ${CVC5_STABLE_COMMIT} + BUILD_IN_SOURCE YES + CONFIGURE_COMMAND ${SHELL} ./configure.sh production --gpl --auto-download --cocoa --cryptominisat --kissat -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} --prefix=${CVC5_BUILD} + BUILD_COMMAND make -C build -j8 + INSTALL_COMMAND make -C build install + UPDATE_COMMAND "" # No update step + # needed by ninja + # See https://stackoverflow.com/questions/48142082/cmake-externalproject-add-project-not-building-before-targets-that-depend-on-it + BUILD_BYPRODUCTS ${CVC5_LIB} ${CVC5_INCLUDE} + ) +endif() add_library(cvc5-lib SHARED IMPORTED) add_dependencies(cvc5-lib cvc5) diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/README.md b/barretenberg/cpp/src/barretenberg/smt_verification/README.md index 1908ea5709db..ec7074ab5936 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/README.md +++ b/barretenberg/cpp/src/barretenberg/smt_verification/README.md @@ -8,7 +8,7 @@ Then just build with `smt-verification` preset. ## 1. Setting variable names during circuit creation and exporting the circuit. -### There're four new methods inside Standard and Ultra CircuitBuilders +### There're four new methods inside the CircuitBuilder - ```set_variable_name(u32 index, str name)``` - assignes a name to a variable. Specifically, binds a name with the first index of an equivalence class. @@ -16,8 +16,6 @@ Then just build with `smt-verification` preset. - ```update_variable_names(u32 idx)``` - in case you've called ```assert_equal``` and ```update_real_variable_indices``` somewhere and you know that two or more variables from the equivalence class have separate names, call this method. Idx is the index of one of the variables of this class. The name of the first variable in class will remain. -- ```finalize_variable_names()``` - in case you don't want to mess with previous method, this one finds all the collisions and removes them. - - ```export_circuit()``` - exports all variables, gates, and assigned names to an msgpack-compatible buffer namely `msgpack::sbuffer`. To store it on the disk just do the following @@ -118,20 +116,17 @@ To store it on the disk just do the following Now we can create symbolic circuit - - ```smt_circuit::StandardCircuit circuit(CircuitSchema c_info, Solver* s, TermType type, str tag="", bool optimizations=true)``` - ```smt_circuit::UltraCircuit circuit(CircuitSchema c_info, Solver* s, TermType type, str tag="", bool optimizations=true)``` It will generate all the symbolic values of the circuit wires, add all the gate constrains, create a map `term_name->STerm` and the inverse of it. Where `term_name` is the name you provided earlier. In case you want to create two similar circuits with the same `solver` and `schema`, then you should specify the `tag`(name) of a circuit. - **Advanced** If you don't want the circuit optimizations to be applied then you should set `optimizations` to `false`. Optimizations interchange the complex circuits like bitwise XOR with simple XOR operation. More on optimizations can be found [standard_circuit.cpp](circuit/standard_circuit.cpp) + **Advanced** If you don't want the circuit optimizations to be applied then you should set `optimizations` to `false`. Optimizations interchange the complex circuits like bitwise XOR with simple XOR operation. More on optimizations can be found [ultra_circuit.cpp](circuit/ultra_circuit.cpp) After the symbolic circuit is initialized, you can get the previously named variables via `circuit[name]` or any other variable by `circuit[idx]`. - There is a method `Circuit::simulate_circuit_eval(vector w)`. It checks that the evaluation process is correct for this particular witness. (Only in Standard for now). - 4. Terms creation ### Arithmetic Variables @@ -287,17 +282,15 @@ You can then import the saved witness using one of the following functions: - `vec import_witness_single(str fname)` ## 4. Automated verification of a unique witness -There's a static member of `StandardCircuit` and `UltraCircuit` +There's a static member in `UltraCircuit` -- `pair StandardCircuit::unique_wintes(CircuitSchema circuit_info, Solver*, TermType type, vector equal, bool optimizations)` - `pair UltraCircuit::unique_wintes(CircuitSchema circuit_info, Solver*, TermType type, vector equal, bool optimizations)` They will create two separate circuits, constrain variables with names from `equal` to be equal acrosss the circuits, and set all the other variables to be not equal at the same time. Another one is -- `pair StandardCircuit::unique_witness_ext(CircuitSchema circuit_info, Solver* s, TermType type, vector equal_variables, vector nequal_variables, vector at_least_one_equal_variable, vector at_least_one_nequal_variable)` that does the same but provides you with more flexible settings. -- Same in `UltraCircuit` +- `pair UltraCircuit::unique_witness_ext(CircuitSchema circuit_info, Solver* s, TermType type, vector equal_variables, vector nequal_variables, vector at_least_one_equal_variable, vector at_least_one_nequal_variable)` that does the same but provides you with more flexible settings. The return circuits can be useful, if you want to define some additional constraints, that are not covered by the above functions. You can call `s.check`, `s.model`, `smt_timer` or `default_model` further. @@ -351,9 +344,7 @@ Avalaible test suits in `smt_verification_tests`: - `SymbolicSet*` --- -- `Subcircuits*` -- `Standard_circuit*` -- `Ultra_circuit*` +- `UltraCircuitSMT*` --- - `SMT_Example*` @@ -363,7 +354,7 @@ Avalaible test suits in `smt_verification_tests`: ### Function Equality ```cpp - StandardCircuitBuilder builder = StandardCircuitBuilder(); + UltraCircuitBuilder builder; field_t a(witness_t(&builder, fr::random_element())); field_t b(witness_t(&builder, fr::random_element())); @@ -378,7 +369,7 @@ Avalaible test suits in `smt_verification_tests`: smt_circuit::CircuitSchema circuit_info = smt_circuit::unpack_from_buffer(buf); smt_solver::Solver s(circuit_info.modulus); - smt_circuit::StandardCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); + smt_circuit::UltraCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); smt_terms::STerm a1 = circuit["a"]; smt_terms::STerm b1 = circuit["b"]; smt_terms::STerm c1 = circuit["c"]; @@ -393,7 +384,7 @@ Avalaible test suits in `smt_verification_tests`: ``` ### Function Equality with mistake ```cpp - StandardCircuitBuilder builder = StandardCircuitBuilder(); + UltraCircuitBuilder builder; field_t a(witness_t(&builder, fr::random_element())); field_t b(witness_t(&builder, fr::random_element())); @@ -408,7 +399,7 @@ Avalaible test suits in `smt_verification_tests`: smt_circuit::CircuitSchema circuit_info = smt_circuit::unpack_from_buffer(buf); smt_solver::Solver s(circuit_info.modulus); - smt_circuit::StandardCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); + smt_circuit::UltraCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); smt_terms::STerm a1 = circuit["a"]; smt_terms::STerm b1 = circuit["b"]; @@ -438,7 +429,7 @@ Avalaible test suits in `smt_verification_tests`: // witness using unique_witness_ext function // Find both roots of a quadratic equation x^2 + a * x + b = s - StandardCircuitBuilder builder = StandardCircuitBuilder(); + UltraCircuitBuilder builder; field_t a(pub_witness_t(&builder, fr::random_element())); field_t b(pub_witness_t(&builder, fr::random_element())); @@ -455,7 +446,7 @@ Avalaible test suits in `smt_verification_tests`: smt_solver::Solver s(circuit_info.modulus); auto cirs = - smt_circuit::StandardCircuit::unique_witness_ext(circuit_info, &s, smt_terms::TermType::FFTerm, { "ev" }, { "z" }); + smt_circuit::UltraCircuit::unique_witness_ext(circuit_info, &s, smt_terms::TermType::FFTerm, { "ev" }, { "z" }); bool res = s.check(); ASSERT_TRUE(res); @@ -490,6 +481,6 @@ More examples can be found in - [terms/ffterm.test.cpp](terms/ffterm.test.cpp), [terms/ffiterm.test.cpp](terms/ffiterm.test.cpp), [terms/bvterm.test.cpp](terms/bvterm.test.cpp), [terms/iterm.test.cpp](terms/iterm.test.cpp) - [terms/bool.test.cpp](terms/bool.test.cpp) - [terms/data_types.test.cpp] -- [circuit/standard_circuit.test.cpp](circuit/standard_circuit.test.cpp), [circuit/ultra_circuit](circuit/ultra_circuit.test.cpp) +- [circuit/ultra_circuit](circuit/ultra_circuit.test.cpp) - [smt_polynomials.test.cpp](smt_polynomials.test.cpp), [smt_examples.test.cpp](smt_examples.test.cpp) - [bb_tests](bb_tests) diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/circuit_base.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/circuit_base.hpp index 8ce86525160d..fc01a3a22179 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/circuit_base.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/circuit_base.hpp @@ -9,13 +9,12 @@ #include "barretenberg/smt_verification/terms/data_structures.hpp" #include "barretenberg/smt_verification/terms/term.hpp" -#include "subcircuits.hpp" +#include "barretenberg/smt_verification/circuit/circuit_schema.hpp" namespace smt_circuit { using namespace smt_solver; using namespace smt_terms; using namespace smt_circuit_schema; -using namespace smt_subcircuits; enum class SubcircuitType { XOR, AND, RANGE, ROR, SHL, SHR }; @@ -38,9 +37,6 @@ class CircuitBase { std::unordered_map optimized; // keeps track of the variables that were excluded from symbolic // circuit during optimizations bool enable_optimizations; // flags to turn on circuit optimizations - std::unordered_map> - cached_subcircuits; // caches subcircuits during optimization - // No need to recompute them each time std::unordered_map> post_process; // Values idxs that should be post processed after the solver returns a witness. // Basically it affects only optimized out variables. diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.cpp deleted file mode 100644 index 41e9f9b484ba..000000000000 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.cpp +++ /dev/null @@ -1,1064 +0,0 @@ -#include "standard_circuit.hpp" - -namespace smt_circuit { - -/** - * @brief Construct a new StandardCircuit object - * - * @param circuit_info CircuitShema object - * @param solver pointer to the global solver - * @param tag tag of the circuit. Empty by default. - */ -StandardCircuit::StandardCircuit( - CircuitSchema& circuit_info, Solver* solver, TermType type, const std::string& tag, bool enable_optimizations) - : CircuitBase(circuit_info.vars_of_interest, - circuit_info.variables, - circuit_info.public_inps, - circuit_info.real_variable_index, - circuit_info.real_variable_tags, - solver, - type, - tag, - enable_optimizations) - , selectors(circuit_info.selectors[0]) - , wires_idxs(circuit_info.wires[0]) -{ - this->symbolic_vars[this->variable_names_inverse["one"]] == bb::fr::one(); - // Perform all relaxations for gates or - // add gate in its normal state to solver - size_t i = 0; - while (i < this->get_num_gates()) { - i = this->prepare_gates(i); - } -} - -/** - * @brief Adds all the gate constraints to the solver. - * Relaxes constraint system for non-ff solver engines - * via removing subcircuits that were already proved being correct. - * - */ -size_t StandardCircuit::prepare_gates(size_t cursor) -{ - if (this->type == TermType::BVTerm && this->enable_optimizations) { - size_t res = handle_logic_constraint(cursor); - if (res != static_cast(-1)) { - return res; - } - } - - if ((this->type == TermType::BVTerm || this->type == TermType::FFITerm) && this->enable_optimizations) { - size_t res = handle_range_constraint(cursor); - if (res != static_cast(-1)) { - return res; - } - } - - if ((this->type == TermType::BVTerm) && this->enable_optimizations) { - size_t res = handle_ror_constraint(cursor); - if (res != static_cast(-1)) { - return res; - } - } - - if ((this->type == TermType::BVTerm) && this->enable_optimizations) { - size_t res = handle_shl_constraint(cursor); - if (res != static_cast(-1)) { - return res; - } - } - if ((this->type == TermType::BVTerm) && this->enable_optimizations) { - size_t res = handle_shr_constraint(cursor); - if (res != static_cast(-1)) { - return res; - } - } - - bb::fr q_m = this->selectors[cursor][0]; - bb::fr q_1 = this->selectors[cursor][1]; - bb::fr q_2 = this->selectors[cursor][2]; - bb::fr q_3 = this->selectors[cursor][3]; - bb::fr q_c = this->selectors[cursor][4]; - uint32_t w_l = this->wires_idxs[cursor][0]; - uint32_t w_r = this->wires_idxs[cursor][1]; - uint32_t w_o = this->wires_idxs[cursor][2]; - optimized[w_l] = false; - optimized[w_r] = false; - optimized[w_o] = false; - - // Handles the case when we have univariate polynomial as constraint - // by simply finding the roots via quadratic formula(or linear) - // There're 7 possibilities of that, which are present below - bool univariate_flag = false; - univariate_flag |= (w_l == w_r) && (w_r == w_o); - univariate_flag |= (w_l == w_r) && (q_3 == 0); - univariate_flag |= (w_l == w_o) && (q_2 == 0) && (q_m == 0); - univariate_flag |= (w_r == w_o) && (q_1 == 0) && (q_m == 0); - univariate_flag |= (q_m == 0) && (q_1 == 0) && (q_3 == 0); - univariate_flag |= (q_m == 0) && (q_2 == 0) && (q_3 == 0); - univariate_flag |= (q_m == 0) && (q_1 == 0) && (q_2 == 0); - - // Univariate gate. Relaxes the solver. Or is it? - // TODO(alex): Test the effect of this relaxation after the tests are merged. - if (univariate_flag) { - if ((q_m == 1) && (q_1 == 0) && (q_2 == 0) && (q_3 == -1) && (q_c == 0)) { - (Bool(this->symbolic_vars[w_l]) == - Bool(STerm(0, this->solver, this->type)) | // STerm(0, this->solver, this->type)) | - Bool(this->symbolic_vars[w_l]) == - Bool(STerm(1, this->solver, this->type))) // STerm(1, this->solver, this->type))) - .assert_term(); - } else { - this->handle_univariate_constraint(q_m, q_1, q_2, q_3, q_c, w_l); - } - } else { - STerm eq = this->symbolic_vars[this->variable_names_inverse["zero"]]; - - // mul selector - if (q_m != 0) { - eq += this->symbolic_vars[w_l] * this->symbolic_vars[w_r] * q_m; - } - // left selector - if (q_1 != 0) { - eq += this->symbolic_vars[w_l] * q_1; - } - // right selector - if (q_2 != 0) { - eq += this->symbolic_vars[w_r] * q_2; - } - // out selector - if (q_3 != 0) { - eq += this->symbolic_vars[w_o] * q_3; - } - // constant selector - if (q_c != 0) { - eq += q_c; - } - eq == 0; - } - return cursor + 1; -} - -/** - * @brief Relaxes univariate polynomial constraints. - * TODO(alex): probably won't be necessary in the nearest future - * because of new solver - * - * @param q_m multiplication selector - * @param q_1 l selector - * @param q_2 r selector - * @param q_3 o selector - * @param q_c constant - * @param w witness index - */ -void StandardCircuit::handle_univariate_constraint( - bb::fr q_m, bb::fr q_1, bb::fr q_2, bb::fr q_3, bb::fr q_c, uint32_t w) -{ - bb::fr b = q_1 + q_2 + q_3; - - if (q_m == 0) { - this->symbolic_vars[w] == -q_c / b; - return; - } - - std::pair d = (b * b - bb::fr(4) * q_m * q_c).sqrt(); - if (!d.first) { - throw std::invalid_argument("There're no roots of quadratic polynomial"); - } - bb::fr x1 = (-b + d.second) / (bb::fr(2) * q_m); - bb::fr x2 = (-b - d.second) / (bb::fr(2) * q_m); - - if (d.second == 0) { - this->symbolic_vars[w] == STerm(x1, this->solver, type); - } else { - ((Bool(this->symbolic_vars[w]) == Bool(STerm(x1, this->solver, this->type))) | - (Bool(this->symbolic_vars[w]) == Bool(STerm(x2, this->solver, this->type)))) - .assert_term(); - } -} - -// TODO(alex): Optimized out variables should be filled with proper values... -/** - * @brief Relaxes logic constraints(AND/XOR). - * @details This function is needed when we use bitwise compatible - * symbolic terms. - * It compares the chunk of selectors of the current circuit - * with pure create_logic_constraint from circuit_builder. - * It uses binary search to find a bit length of the constraint, - * since we don't know it in general. - * After a match is found, it updates the cursor to skip all the - * redundant constraints and adds a pure a ^ b = c or a & b = c - * constraint to solver. - * If there's no match, it will return -1 - * - * @param cursor current position - * @return next position or -1 - */ -size_t StandardCircuit::handle_logic_constraint(size_t cursor) -{ - // Initialize binary search. Logic gate can only accept even bit lengths - // So we need to find a match among [1, 127] and then multiply the result by 2 - size_t beg = 1; - size_t end = 127; - size_t mid = 0; - auto res = static_cast(-1); - - // Indicates that current bit length is a match for XOR - bool xor_flag = true; - // Indicates that current bit length is a match for AND - bool and_flag = true; - // Indicates the logic operation(true - XOR, false - AND) if the match is found. - bool logic_flag = true; - bool stop_flag = false; - - while (beg <= end) { - mid = (end + beg) / 2; - - // Take a pure logic circuit for the current bit length(2 * mid) - // and compare it's selectors to selectors of the global circuit - // at current position(cursor). - // If they are equal, we can apply an optimization - // However, if we have a match at bit length 2, it is possible - // to have a match at higher bit lengths. That's why we store - // the current match as `res` and proceed with ordinary binary search. - // `stop_flag` simply indicates that the first selector doesn't match - // and we can skip this whole section. - - if (!this->cached_subcircuits[SubcircuitType::XOR].contains(mid * 2)) { - this->cached_subcircuits[SubcircuitType::XOR].insert( - { mid * 2, get_standard_logic_circuit(mid * 2, true) }); - } - CircuitProps xor_props = this->cached_subcircuits[SubcircuitType::XOR][mid * 2]; - - if (!this->cached_subcircuits[SubcircuitType::AND].contains(mid * 2)) { - this->cached_subcircuits[SubcircuitType::AND].insert( - { mid * 2, get_standard_logic_circuit(mid * 2, false) }); - } - CircuitProps and_props = this->cached_subcircuits[SubcircuitType::AND][mid * 2]; - - CircuitSchema xor_circuit = xor_props.circuit; - CircuitSchema and_circuit = and_props.circuit; - - xor_flag = cursor + xor_props.num_gates <= this->selectors.size(); - and_flag = cursor + xor_props.num_gates <= this->selectors.size(); - if (xor_flag || and_flag) { - for (size_t j = 0; j < xor_props.num_gates; j++) { - // It is possible for gates to be equal but wires to be not, but I think it's very - // unlikely to happen - xor_flag &= xor_circuit.selectors[0][j + xor_props.start_gate] == this->selectors[cursor + j]; - and_flag &= and_circuit.selectors[0][j + and_props.start_gate] == this->selectors[cursor + j]; - - // Before this fix this routine simplified two consecutive n bit xors(ands) into one 2n bit xor(and) - // Now it checks out_accumulator_idx and new_out_accumulator_idx match - // 14 here is a size of one iteration of logic_gate for loop in term of gates - // 13 is the accumulator index relative to the beginning of the iteration - - size_t single_iteration_size = 14; - size_t relative_acc_idx = 13; - xor_flag &= - (j % single_iteration_size != relative_acc_idx) || (j == relative_acc_idx) || - (this->wires_idxs[j + cursor][0] == this->wires_idxs[j + cursor - single_iteration_size][2]); - and_flag &= - (j % single_iteration_size != relative_acc_idx) || (j == relative_acc_idx) || - (this->wires_idxs[j + cursor][0] == this->wires_idxs[j + cursor - single_iteration_size][2]); - - if (!xor_flag && !and_flag) { - // Won't match at any bit length - if (j == 0) { - stop_flag = true; - } - break; - } - } - } - if (stop_flag) { - break; - } - - if (!xor_flag && !and_flag) { - end = mid - 1; - } else { - res = 2 * mid; - logic_flag = xor_flag; - - beg = mid + 1; - } - } - - if (res != static_cast(-1)) { - CircuitProps xor_props = get_standard_logic_circuit(res, true); - CircuitProps and_props = get_standard_logic_circuit(res, false); - - info("Logic constraint optimization: ", std::to_string(res), " bits. is_xor: ", logic_flag); - size_t left_gate = xor_props.gate_idxs[0]; - uint32_t left_gate_idx = xor_props.idxs[0]; - size_t right_gate = xor_props.gate_idxs[1]; - uint32_t right_gate_idx = xor_props.idxs[1]; - size_t out_gate = xor_props.gate_idxs[2]; - uint32_t out_gate_idx = xor_props.idxs[2]; - - uint32_t left_idx = this->real_variable_index[this->wires_idxs[cursor + left_gate][left_gate_idx]]; - uint32_t right_idx = this->real_variable_index[this->wires_idxs[cursor + right_gate][right_gate_idx]]; - uint32_t out_idx = this->real_variable_index[this->wires_idxs[cursor + out_gate][out_gate_idx]]; - - STerm left = this->symbolic_vars[left_idx]; - STerm right = this->symbolic_vars[right_idx]; - STerm out = this->symbolic_vars[out_idx]; - - // Initializing the parts of the witness that were optimized - // during the symbolic constraints initialization - // i.e. simulating the create_logic_constraint gate by gate using BitVectors/Integers - size_t num_bits = res; - size_t processed_gates = 0; - for (size_t i = num_bits - 1; i < num_bits; i -= 2) { - // 8 here is the number of gates we have to skip to get proper indices - processed_gates += 8; - uint32_t left_quad_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - uint32_t left_lo_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][1]]; - uint32_t left_hi_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - processed_gates += 1; - uint32_t right_quad_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - uint32_t right_lo_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][1]]; - uint32_t right_hi_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - processed_gates += 1; - uint32_t out_quad_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - uint32_t out_lo_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][1]]; - uint32_t out_hi_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - processed_gates += 1; - uint32_t old_left_acc_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - processed_gates += 1; - uint32_t old_right_acc_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - processed_gates += 1; - uint32_t old_out_acc_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - processed_gates += 1; - - this->symbolic_vars[old_left_acc_idx] == (left >> static_cast(i - 1)); - this->symbolic_vars[left_quad_idx] == (this->symbolic_vars[old_left_acc_idx] & 3); - this->symbolic_vars[left_lo_idx] == (this->symbolic_vars[left_quad_idx] & 1); - this->symbolic_vars[left_hi_idx] == (this->symbolic_vars[left_quad_idx] >> 1); - this->symbolic_vars[old_right_acc_idx] == (right >> static_cast(i - 1)); - this->symbolic_vars[right_quad_idx] == (this->symbolic_vars[old_right_acc_idx] & 3); - this->symbolic_vars[right_lo_idx] == (this->symbolic_vars[right_quad_idx] & 1); - this->symbolic_vars[right_hi_idx] == (this->symbolic_vars[right_quad_idx] >> 1); - this->symbolic_vars[old_out_acc_idx] == (out >> static_cast(i - 1)); - this->symbolic_vars[out_quad_idx] == (this->symbolic_vars[old_out_acc_idx] & 3); - this->symbolic_vars[out_lo_idx] == (this->symbolic_vars[out_quad_idx] & 1); - this->symbolic_vars[out_hi_idx] == (this->symbolic_vars[out_quad_idx] >> 1); - } - - if (logic_flag) { - (left ^ right) == out; - } else { - (left & right) == out; - } - - // You have to mark these arguments so they won't be optimized out - optimized[left_idx] = false; - optimized[right_idx] = false; - optimized[out_idx] = false; - return cursor + xor_props.num_gates; - } - return res; -} - -/** - * @brief Relaxes range constraints. - * @details This function is needed when we use range compatible - * symbolic terms. - * It compares the chunk of selectors of the current circuit - * with pure create_range_constraint from circuit_builder. - * It uses binary search to find a bit length of the constraint, - * since we don't know it in general. - * After match is found, it updates the cursor to skip all the - * redundant constraints and adds a pure a < 2^bit_length - * constraint to solver. - * If there's no match, it will return -1 - * - * @param cursor current position - * @return next position or -1 - */ -size_t StandardCircuit::handle_range_constraint(size_t cursor) -{ - // Indicates that current bit length is a match - bool range_flag = true; - size_t mid = 0; - auto res = static_cast(-1); - - // Range constraints differ depending on oddness of bit_length - // That's why we need to handle these cases separately - for (size_t odd = 0; odd < 2; odd++) { - // Initialize binary search. - // We need to find a match among [1, 127] and then set the result to 2 * mid, or 2 * mid + 1 - size_t beg = 1; - size_t end = 127; - - bool stop_flag = false; - while (beg <= end) { - mid = (end + beg) / 2; - - // Take a pure logic circuit for the current bit length(2 * mid + odd) - // and compare it's selectors to selectors of the global circuit - // at current positin(cursor). - // If they are equal, we can apply an optimization - // However, if we have a match at bit length 2, it is possible - // to have a match at higher bit lengths. That's why we store - // the current match as `res` and proceed with ordinary binary search. - // `stop_flag` simply indicates that the first selector doesn't match - // and we can skip this whole section. - - if (!this->cached_subcircuits[SubcircuitType::RANGE].contains(2 * mid + odd)) { - this->cached_subcircuits[SubcircuitType::RANGE].insert( - { 2 * mid + odd, get_standard_range_constraint_circuit(2 * mid + odd) }); - } - CircuitProps range_props = this->cached_subcircuits[SubcircuitType::RANGE][2 * mid + odd]; - CircuitSchema range_circuit = range_props.circuit; - - range_flag = cursor + range_props.num_gates <= this->get_num_gates(); - if (range_flag) { - for (size_t j = 0; j < range_props.num_gates; j++) { - // It is possible for gates to be equal but wires to be not, but I think it's very - // unlikely to happen - range_flag &= range_circuit.selectors[0][j + range_props.start_gate] == this->selectors[cursor + j]; - - if (!range_flag) { - // Won't match at any bit length - if (j <= 2) { - stop_flag = true; - } - break; - } - } - } - if (stop_flag) { - break; - } - - if (!range_flag) { - end = mid - 1; - } else { - res = 2 * mid + odd; - beg = mid + 1; - } - } - - if (res != static_cast(-1)) { - range_flag = true; - break; - } - } - - if (range_flag) { - info("Range constraint optimization: ", std::to_string(res), " bits"); - CircuitProps range_props = get_standard_range_constraint_circuit(res); - - size_t left_gate = range_props.gate_idxs[0]; - uint32_t left_gate_idx = range_props.idxs[0]; - uint32_t left_idx = this->real_variable_index[this->wires_idxs[cursor + left_gate][left_gate_idx]]; - - STerm left = this->symbolic_vars[left_idx]; - - // preserving shifted values - // we need this because even right shifts do not create - // any additional gates and therefore are undetectible - - // Simulate the range constraint circuit using the bitwise operations - size_t num_bits = res; - size_t num_quads = num_bits >> 1; - num_quads += num_bits & 1; - uint32_t processed_gates = 0; - - // Initializing the parts of the witness that were optimized - // during the symbolic constraints initialization - // i.e. simulating the decompose_into_base4_accumulators gate by gate using BitVectors/Integers - for (size_t i = num_quads - 1; i < num_quads; i--) { - uint32_t lo_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - processed_gates += 1; - uint32_t quad_idx = 0; - uint32_t old_accumulator_idx = 0; - uint32_t hi_idx = 0; - - if (i == num_quads - 1 && ((num_bits & 1) == 1)) { - quad_idx = lo_idx; - } else { - hi_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - processed_gates += 1; - quad_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - processed_gates += 1; - } - - if (i == num_quads - 1) { - old_accumulator_idx = quad_idx; - } else { - old_accumulator_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - processed_gates += 1; - } - - this->symbolic_vars[old_accumulator_idx] == (left >> static_cast(2 * i)); - this->symbolic_vars[quad_idx] == (this->symbolic_vars[old_accumulator_idx] & 3); - this->symbolic_vars[lo_idx] == (this->symbolic_vars[quad_idx] & 1); - if (i != (num_quads - 1) || ((num_bits)&1) != 1) { - this->symbolic_vars[hi_idx] == (this->symbolic_vars[quad_idx] >> 1); - } - } - - left <= (bb::fr(2).pow(res) - 1); - - // You have to mark these arguments so they won't be optimized out - optimized[left_idx] = false; - return cursor + range_props.num_gates; - } - return res; -} - -/** - * @brief Relaxes shr constraints. - * @details This function is needed when we use bitwise compatible - * symbolic terms. - * It compares the chunk of selectors of the current circuit - * with pure shift left from uint/logic.cpp - * After a match is found, it updates the cursor to skip all the - * redundant constraints and adds a pure b = a >> n - * constraint to solver. - * If there's no match, it will return -1 - * - * @param cursor current position - * @return next position or -1 - */ -size_t StandardCircuit::handle_shr_constraint(size_t cursor) -{ - auto res = static_cast(-1); - - // Take a pure shr circuit for the current bit length - // and compare it's selectors to selectors of the global circuit - // at current position(cursor). - // If they are equal, we can apply an optimization - // However, if we have a match at bit length 2, it is possible - // to have a match at higher bit lengths. That's why we store - // the current match as `res` and proceed with ordinary binary search. - // and we can skip this whole section. - // The key is simply two bytes: uint type and sh - - const auto find_nr = [this, &cursor](auto& n, bool& shr_flag) { - // Since shift right for even values of shift is pointless to check - // we iterate only over odd ones - for (uint32_t r = 1; r < static_cast(n); r += 2) { - uint32_t key = static_cast(n) + 256 * r; - if (!this->cached_subcircuits[SubcircuitType::SHR].contains(key)) { - this->cached_subcircuits[SubcircuitType::SHR].insert({ key, get_standard_shift_right_circuit(n, r) }); - } - CircuitProps shr_props = this->cached_subcircuits[SubcircuitType::SHR][key]; - CircuitSchema shr_circuit = shr_props.circuit; - - shr_flag = cursor + shr_props.num_gates <= this->selectors.size(); - if (!shr_flag) { - continue; - } - - for (size_t j = 0; j < shr_props.num_gates; j++) { - // It is possible for gates to be equal but wires to be not, but I think it's very - // unlikely to happen - shr_flag &= shr_circuit.selectors[0][j + shr_props.start_gate] == this->selectors[cursor + j]; - - if (!shr_flag) { - break; - } - } - if (shr_flag) { - return std::pair(n, r); - } - } - return std::pair(-1, -1); - }; - - bool shr_flag = false; - std::pair nr; - - if (!shr_flag) { - unsigned char n = 8; - nr = find_nr(n, shr_flag); - } - if (!shr_flag) { - uint16_t n = 16; - nr = find_nr(n, shr_flag); - } - if (!shr_flag) { - uint32_t n = 32; - nr = find_nr(n, shr_flag); - } - if (!shr_flag) { - uint64_t n = 64; - nr = find_nr(n, shr_flag); - } - - if (shr_flag) { - info("SHR constraint optimization: ", - std::to_string(nr.first), - " bits ,", - std::to_string(nr.second), - " shift right"); - CircuitProps shr_props = this->cached_subcircuits[SubcircuitType::SHR][nr.first + 256 * nr.second]; - - size_t left_gate = shr_props.gate_idxs[0]; - uint32_t left_gate_idx = shr_props.idxs[0]; - uint32_t left_idx = this->real_variable_index[this->wires_idxs[cursor + left_gate][left_gate_idx]]; - - size_t out_gate = shr_props.gate_idxs[1]; - uint32_t out_gate_idx = shr_props.idxs[1]; - uint32_t out_idx = this->real_variable_index[this->wires_idxs[cursor + out_gate][out_gate_idx]]; - - STerm left = this->symbolic_vars[left_idx]; - STerm out = this->symbolic_vars[out_idx]; - - // Initializing the parts of the witness that were optimized - // during the symbolic constraints initialization - // i.e. simulating the uint's operator>> gate by gate using BitVectors/Integers - uint32_t shift = nr.second; - if ((shift & 1) == 1) { - size_t processed_gates = 0; - uint32_t c_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - uint32_t delta_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[delta_idx] == (this->symbolic_vars[c_idx] & 3); - STerm delta = this->symbolic_vars[delta_idx]; - processed_gates += 1; - uint32_t r0_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vars[r0_idx] == (-2 * delta * delta + 9 * delta - 7); - this->post_process.insert({ r0_idx, { delta_idx, delta_idx, -2, 9, 0, -7 } }); - - processed_gates += 1; - uint32_t r1_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r1_idx] == (delta >> 1) * 6; - processed_gates += 1; - uint32_t r2_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r2_idx] == (left >> shift) * 6; - processed_gates += 1; - uint32_t temp_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vars[temp_idx] == -6 * out; - this->post_process.insert({ temp_idx, { out_idx, out_idx, 0, -6, 0, 0 } }); - } - - STerm shred = left >> nr.second; - out == shred; - - // You have to mark these arguments so they won't be optimized out - optimized[left_idx] = false; - optimized[out_idx] = false; - return cursor + shr_props.num_gates; - } - return res; -} -/** - * @brief Relaxes shl constraints. - * @details This function is needed when we use bitwise compatible - * symbolic terms. - * It compares the chunk of selectors of the current circuit - * with pure shift left from uint/logic.cpp - * After a match is found, it updates the cursor to skip all the - * redundant constraints and adds a pure b = a << n - * constraint to solver. - * If there's no match, it will return -1 - * - * @param cursor current position - * @return next position or -1 - */ -size_t StandardCircuit::handle_shl_constraint(size_t cursor) -{ - auto res = static_cast(-1); - - // Take a pure shl circuit for the current bit length - // and compare it's selectors to selectors of the global circuit - // at current position(cursor). - // If they are equal, we can apply an optimization - // However, if we have a match at bit length 2, it is possible - // to have a match at higher bit lengths. That's why we store - // the current match as `res` and proceed with ordinary binary search. - // and we can skip this whole section. - // The key is simply two bytes: uint type and sh - - const auto find_nr = [this, &cursor](auto& n, bool& shl_flag) { - for (uint32_t r = 1; r < static_cast(n); r++) { - uint32_t key = static_cast(n) + 256 * r; - if (!this->cached_subcircuits[SubcircuitType::SHL].contains(key)) { - this->cached_subcircuits[SubcircuitType::SHL].insert({ key, get_standard_shift_left_circuit(n, r) }); - } - CircuitProps shl_props = this->cached_subcircuits[SubcircuitType::SHL][key]; - CircuitSchema shl_circuit = shl_props.circuit; - - shl_flag = cursor + shl_props.num_gates <= this->selectors.size(); - if (!shl_flag) { - continue; - } - - for (size_t j = 0; j < shl_props.num_gates; j++) { - // It is possible for gates to be equal but wires to be not, but I think it's very - // unlikely to happen - shl_flag &= shl_circuit.selectors[0][j + shl_props.start_gate] == this->selectors[cursor + j]; - - if (!shl_flag) { - break; - } - } - if (shl_flag) { - return std::pair(n, r); - } - } - return std::pair(-1, -1); - }; - - bool shl_flag = false; - std::pair nr; - - if (!shl_flag) { - unsigned char n = 8; - nr = find_nr(n, shl_flag); - } - if (!shl_flag) { - uint16_t n = 16; - nr = find_nr(n, shl_flag); - } - if (!shl_flag) { - uint32_t n = 32; - nr = find_nr(n, shl_flag); - } - if (!shl_flag) { - uint64_t n = 64; - nr = find_nr(n, shl_flag); - } - - if (shl_flag) { - info("SHL constraint optimization: ", - std::to_string(nr.first), - " bits ,", - std::to_string(nr.second), - " shift left"); - CircuitProps shl_props = this->cached_subcircuits[SubcircuitType::SHL][nr.first + 256 * nr.second]; - - size_t left_gate = shl_props.gate_idxs[0]; - uint32_t left_gate_idx = shl_props.idxs[0]; - uint32_t left_idx = this->real_variable_index[this->wires_idxs[cursor + left_gate][left_gate_idx]]; - - size_t out_gate = shl_props.gate_idxs[1]; - uint32_t out_gate_idx = shl_props.idxs[1]; - uint32_t out_idx = this->real_variable_index[this->wires_idxs[cursor + out_gate][out_gate_idx]]; - - STerm left = this->symbolic_vars[left_idx]; - STerm out = this->symbolic_vars[out_idx]; - - // Initializing the parts of the witness that were optimized - // during the symbolic constraints initialization - // i.e. simulating the uint's operator<< gate by gate using BitVectors/Integers - uint32_t num_bits = nr.first; - uint32_t shift = nr.second; - if ((shift & 1) == 1) { - size_t processed_gates = 0; - uint32_t c_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - uint32_t delta_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[delta_idx] == (this->symbolic_vars[c_idx] & 3); - STerm delta = this->symbolic_vars[delta_idx]; - processed_gates += 1; - uint32_t r0_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vars[r0_idx] == (-2 * delta * delta + 9 * delta - 7); - this->post_process.insert({ r0_idx, { delta_idx, delta_idx, -2, 9, 0, -7 } }); - - processed_gates += 1; - uint32_t r1_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r1_idx] == (delta >> 1) * 6; - processed_gates += 1; - uint32_t r2_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r2_idx] == (left >> (num_bits - shift)) * 6; - processed_gates += 1; - uint32_t temp_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vraiables[temp_idx] == -6 * r2 - this->post_process.insert({ temp_idx, { r2_idx, r2_idx, 0, -1, 0, 0 } }); - } - - STerm shled = (left << shift) & (bb::fr(2).pow(num_bits) - 1); - out == shled; - - // You have to mark these arguments so they won't be optimized out - optimized[left_idx] = false; - optimized[out_idx] = false; - return cursor + shl_props.num_gates; - } - return res; -} - -/** - * @brief Relaxes ror constraints. - * @details This function is needed when we use bitwise compatible - * symbolic terms. - * It compares the chunk of selectors of the current circuit - * with pure ror from uint/logic.cpp - * After a match is found, it updates the cursor to skip all the - * redundant constraints and adds a pure b = a.ror(n) - * constraint to solver. - * If there's no match, it will return -1 - * - * @param cursor current position - * @return next position or -1 - */ -size_t StandardCircuit::handle_ror_constraint(size_t cursor) -{ - auto res = static_cast(-1); - - // Take a pure ror circuit for the current bit length - // and compare it's selectors to selectors of the global circuit - // at current position(cursor). - // If they are equal, we can apply an optimization - // However, if we have a match at bit length 2, it is possible - // to have a match at higher bit lengths. That's why we store - // the current match as `res` and proceed with ordinary binary search. - // and we can skip this whole section. - // The key is simply two bytes: uint type and sh - - const auto find_nr = [this, &cursor](auto& n, bool& ror_flag) { - for (uint32_t r = 1; r < static_cast(n); r++) { - uint32_t key = static_cast(n) + 256 * r; - if (!this->cached_subcircuits[SubcircuitType::ROR].contains(key)) { - this->cached_subcircuits[SubcircuitType::ROR].insert({ key, get_standard_ror_circuit(n, r) }); - } - CircuitProps ror_props = this->cached_subcircuits[SubcircuitType::ROR][key]; - CircuitSchema ror_circuit = ror_props.circuit; - - ror_flag = cursor + ror_props.num_gates <= this->selectors.size(); - if (!ror_flag) { - continue; - } - - for (size_t j = 0; j < ror_props.num_gates; j++) { - // It is possible for gates to be equal but wires to be not, but I think it's very - // unlikely to happen - ror_flag &= ror_circuit.selectors[0][j + ror_props.start_gate] == this->selectors[cursor + j]; - - if (!ror_flag) { - break; - } - } - if (ror_flag) { - return std::pair(n, r); - } - } - return std::pair(-1, -1); - }; - - bool ror_flag = false; - std::pair nr; - - if (!ror_flag) { - unsigned char n = 8; - nr = find_nr(n, ror_flag); - } - if (!ror_flag) { - uint16_t n = 16; - nr = find_nr(n, ror_flag); - } - if (!ror_flag) { - uint32_t n = 32; - nr = find_nr(n, ror_flag); - } - if (!ror_flag) { - uint64_t n = 64; - nr = find_nr(n, ror_flag); - } - - if (ror_flag) { - info("ROR constraint optimization: ", - std::to_string(nr.first), - " bits ,", - std::to_string(nr.second), - " rotation right"); - CircuitProps ror_props = this->cached_subcircuits[SubcircuitType::ROR][nr.first + 256 * nr.second]; - - size_t left_gate = ror_props.gate_idxs[0]; - uint32_t left_gate_idx = ror_props.idxs[0]; - uint32_t left_idx = this->real_variable_index[this->wires_idxs[cursor + left_gate][left_gate_idx]]; - - size_t out_gate = ror_props.gate_idxs[1]; - uint32_t out_gate_idx = ror_props.idxs[1]; - uint32_t out_idx = this->real_variable_index[this->wires_idxs[cursor + out_gate][out_gate_idx]]; - - STerm left = this->symbolic_vars[left_idx]; - STerm out = this->symbolic_vars[out_idx]; - - // Initializing the parts of the witness that were optimized - // during the symbolic constraints initialization - // i.e. simulating the uint's rotate_right gate by gate using BitVectors/Integers - uint32_t num_bits = nr.first; - uint32_t rotation = nr.second; - if ((rotation & 1) == 1) { - size_t processed_gates = 0; - uint32_t c_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][0]]; - uint32_t delta_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[delta_idx] == (this->symbolic_vars[c_idx] & 3); - STerm delta = this->symbolic_vars[delta_idx]; - processed_gates += 1; - uint32_t r0_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vars[r0_idx] == (-2 * delta * delta + 9 * delta - 7); - this->post_process.insert({ r0_idx, { delta_idx, delta_idx, -2, 9, 0, -7 } }); - - processed_gates += 1; - uint32_t r1_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r1_idx] == (delta >> 1) * 6; - processed_gates += 1; - uint32_t r2_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - this->symbolic_vars[r2_idx] == (left >> rotation) * 6; - processed_gates += 1; - uint32_t temp_idx = this->real_variable_index[this->wires_idxs[cursor + processed_gates][2]]; - - // this->symbolic_vraiables[temp_idx] == -6 * r2 - this->post_process.insert({ temp_idx, { r2_idx, r2_idx, 0, -1, 0, 0 } }); - } - - STerm rored = ((left >> rotation) | (left << (num_bits - rotation))) & (bb::fr(2).pow(num_bits) - 1); - out == rored; - - // You have to mark these arguments so they won't be optimized out - optimized[left_idx] = false; - optimized[out_idx] = false; - return cursor + ror_props.num_gates; - } - return res; -} - -/** - * @brief Similar functionality to old .check_circuit() method - * in standard circuit builder. - * - * @param witness - * @return true - * @return false - */ -bool StandardCircuit::simulate_circuit_eval(std::vector& witness) const -{ - if (witness.size() != this->get_num_vars()) { - throw std::invalid_argument("Witness size should be " + std::to_string(this->get_num_vars()) + ", not " + - std::to_string(witness.size())); - } - for (size_t i = 0; i < this->selectors.size(); i++) { - bb::fr res = 0; - bb::fr x = witness[this->wires_idxs[i][0]]; - bb::fr y = witness[this->wires_idxs[i][1]]; - bb::fr o = witness[this->wires_idxs[i][2]]; - res += this->selectors[i][0] * x * y; - res += this->selectors[i][1] * x; - res += this->selectors[i][2] * y; - res += this->selectors[i][3] * o; - res += this->selectors[i][4]; - if (res != 0) { - return false; - } - } - return true; -} - -/** - * @brief Check your circuit for witness uniqueness - * - * @details Creates two Circuit objects that represent the same - * circuit, however you can choose which variables should be (not) equal in both cases, - * and also the variables that should (not) be equal at the same time. - * - * @param circuit_info - * @param s pointer to the global solver - * @param equal The list of names of variables which should be equal in both circuits(each is equal) - * @param not_equal The list of names of variables which should not be equal in both circuits(each is not equal) - * @param equal_at_the_same_time The list of variables, where at least one pair has to be equal - * @param not_equal_at_the_same_time The list of variables, where at least one pair has to be distinct - * @return std::pair - */ -std::pair StandardCircuit::unique_witness_ext( - CircuitSchema& circuit_info, - Solver* s, - TermType type, - const std::vector& equal, - const std::vector& not_equal, - const std::vector& equal_at_the_same_time, - const std::vector& not_equal_at_the_same_time, - bool enable_optimizations) -{ - StandardCircuit c1(circuit_info, s, type, "circuit1", enable_optimizations); - StandardCircuit c2(circuit_info, s, type, "circuit2", enable_optimizations); - - for (const auto& term : equal) { - c1[term] == c2[term]; - } - for (const auto& term : not_equal) { - c1[term] != c2[term]; - } - - std::vector eqs; - for (const auto& term : equal_at_the_same_time) { - Bool tmp = Bool(c1[term]) == Bool(c2[term]); - eqs.push_back(tmp); - } - - if (eqs.size() > 1) { - batch_or(eqs).assert_term(); - } else if (eqs.size() == 1) { - eqs[0].assert_term(); - } - - std::vector neqs; - for (const auto& term : not_equal_at_the_same_time) { - Bool tmp = Bool(c1[term]) != Bool(c2[term]); - neqs.push_back(tmp); - } - - if (neqs.size() > 1) { - batch_or(neqs).assert_term(); - } else if (neqs.size() == 1) { - neqs[0].assert_term(); - } - return { c1, c2 }; -} - -/** - * @brief Check your circuit for witness uniqueness - * - * @details Creates two Circuit objects that represent the same - * circuit, however you can choose which variables should be equal in both cases, - * other witness members will be marked as not equal at the same time - * or basically they will have to differ by at least one element. - * - * @param circuit_info - * @param s pointer to the global solver - * @param equal The list of names of variables which should be equal in both circuits(each is equal) - * @return std::pair - */ -std::pair StandardCircuit::unique_witness(CircuitSchema& circuit_info, - Solver* s, - TermType type, - const std::vector& equal, - bool enable_optimizations) -{ - StandardCircuit c1(circuit_info, s, type, "circuit1", enable_optimizations); - StandardCircuit c2(circuit_info, s, type, "circuit2", enable_optimizations); - - for (const auto& term : equal) { - c1[term] == c2[term]; - } - - std::vector neqs; - for (const auto& node : c1.symbolic_vars) { - uint32_t i = node.first; - if (std::find(equal.begin(), equal.end(), std::string(c1.variable_names[i])) != equal.end()) { - continue; - } - if (c1.optimized[i]) { - continue; - } - Bool tmp = Bool(c1[i]) != Bool(c2[i]); - neqs.push_back(tmp); - } - - if (neqs.size() > 1) { - batch_or(neqs).assert_term(); - } else if (neqs.size() == 1) { - neqs[0].assert_term(); - } - return { c1, c2 }; -} -}; // namespace smt_circuit \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.hpp deleted file mode 100644 index 967c7ffdad8b..000000000000 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/standard_circuit.hpp +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once -#include "circuit_base.hpp" - -namespace smt_circuit { - -/** - * @brief Symbolic Circuit class for Standard Circuit Builder. - * - * @details Contains all the information about the circuit: gates, variables, - * symbolic variables, specified names and global solver. - */ -class StandardCircuit : public CircuitBase { - public: - std::vector> selectors; // selectors from the circuit - std::vector> wires_idxs; // values of the gates' wires - - explicit StandardCircuit(CircuitSchema& circuit_info, - Solver* solver, - TermType type = TermType::FFTerm, - const std::string& tag = "", - bool enable_optimizations = true); - - inline size_t get_num_gates() const { return selectors.size(); }; - - size_t prepare_gates(size_t cursor); - bool simulate_circuit_eval(std::vector& witness) const override; - - void handle_univariate_constraint(bb::fr q_m, bb::fr q_1, bb::fr q_2, bb::fr q_3, bb::fr q_c, uint32_t w); - size_t handle_logic_constraint(size_t cursor); - size_t handle_range_constraint(size_t cursor); - size_t handle_ror_constraint(size_t cursor); - size_t handle_shr_constraint(size_t cursor); - size_t handle_shl_constraint(size_t cursor); - - static std::pair unique_witness_ext( - CircuitSchema& circuit_info, - Solver* s, - TermType type, - const std::vector& equal = {}, - const std::vector& not_equal = {}, - const std::vector& equal_at_the_same_time = {}, - const std::vector& not_equal_at_the_same_time = {}, - bool enable_optimizations = false); - - static std::pair unique_witness(CircuitSchema& circuit_info, - Solver* s, - TermType type, - const std::vector& equal = {}, - bool enable_optimizations = false); -}; -}; // namespace smt_circuit \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.cpp index 28f76200fb04..9df738cc61c2 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.cpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.cpp @@ -53,11 +53,6 @@ UltraCircuit::UltraCircuit(CircuitSchema& circuit_info, lookup_cursor = this->handle_lookup_relation(lookup_cursor); } - size_t memory_cursor = 0; - while (memory_cursor < this->selectors[BlockType::MEMORY].size()) { - memory_cursor = this->handle_memory_relation(memory_cursor); - } - size_t nnf_cursor = 0; while (nnf_cursor < this->selectors[BlockType::NNF].size()) { nnf_cursor = this->handle_nnf_relation(nnf_cursor); @@ -84,20 +79,20 @@ UltraCircuit::UltraCircuit(CircuitSchema& circuit_info, */ size_t UltraCircuit::handle_arithmetic_relation(size_t cursor) { - bb::fr q_m = this->selectors[BlockType::ARITHMETIC][cursor][0]; - bb::fr q_l = this->selectors[BlockType::ARITHMETIC][cursor][1]; - bb::fr q_r = this->selectors[BlockType::ARITHMETIC][cursor][2]; - bb::fr q_o = this->selectors[BlockType::ARITHMETIC][cursor][3]; - bb::fr q_4 = this->selectors[BlockType::ARITHMETIC][cursor][4]; - bb::fr q_c = this->selectors[BlockType::ARITHMETIC][cursor][5]; - bb::fr q_arith = this->selectors[BlockType::ARITHMETIC][cursor][6]; - - uint32_t w_l_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][0]; - uint32_t w_r_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][2]; - uint32_t w_4_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][3]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][4]; - uint32_t w_4_shift_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][7]; + bb::fr q_m = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_m]; + bb::fr q_l = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_1]; + bb::fr q_r = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_2]; + bb::fr q_o = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_3]; + bb::fr q_4 = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_4]; + bb::fr q_c = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_c]; + bb::fr q_arith = this->selectors[BlockType::ARITHMETIC][cursor][SelectorType::q_arith]; + + uint32_t w_l_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_l]; + uint32_t w_r_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_r]; + uint32_t w_o_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_o]; + uint32_t w_4_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_4]; + uint32_t w_l_shift_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_l_shift]; + uint32_t w_4_shift_idx = this->wires_idxs[BlockType::ARITHMETIC][cursor][WireType::w_4_shift]; STerm w_l = this->symbolic_vars[w_l_idx]; STerm w_r = this->symbolic_vars[w_r_idx]; @@ -156,6 +151,7 @@ size_t UltraCircuit::handle_arithmetic_relation(size_t cursor) optimized[w_r_idx] = false; optimized[w_o_idx] = false; optimized[w_4_idx] = false; + optimized[w_4_shift_idx] = false; } if (q_arith * (q_arith - 1) * (q_arith - 2) != 0) { @@ -201,7 +197,7 @@ void UltraCircuit::process_new_table(uint32_t table_idx) this->tables_sizes.insert({ table_idx, new_table.size() }); info(table_name, RESET); - SymSet new_stable(new_table, this->tag + table_name); + SymSet new_stable(new_table, table_name + this->tag); this->cached_symbolic_tables.insert({ table_idx, new_stable }); } @@ -215,22 +211,22 @@ void UltraCircuit::process_new_table(uint32_t table_idx) */ size_t UltraCircuit::handle_lookup_relation(size_t cursor) { - bb::fr q_m = this->selectors[BlockType::LOOKUP][cursor][0]; - bb::fr q_r = this->selectors[BlockType::LOOKUP][cursor][2]; - bb::fr q_o = this->selectors[BlockType::LOOKUP][cursor][3]; - bb::fr q_c = this->selectors[BlockType::LOOKUP][cursor][5]; - bb::fr q_lookup = this->selectors[BlockType::LOOKUP][cursor][10]; + bb::fr q_m = this->selectors[BlockType::LOOKUP][cursor][SelectorType::q_m]; + bb::fr q_r = this->selectors[BlockType::LOOKUP][cursor][SelectorType::q_2]; + bb::fr q_o = this->selectors[BlockType::LOOKUP][cursor][SelectorType::q_3]; + bb::fr q_c = this->selectors[BlockType::LOOKUP][cursor][SelectorType::q_c]; + bb::fr q_lookup = this->selectors[BlockType::LOOKUP][cursor][SelectorType::q_lookup]; if (q_lookup.is_zero()) { return cursor + 1; } - uint32_t w_l_idx = this->wires_idxs[BlockType::LOOKUP][cursor][0]; - uint32_t w_r_idx = this->wires_idxs[BlockType::LOOKUP][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::LOOKUP][cursor][2]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][4]; - uint32_t w_r_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][5]; - uint32_t w_o_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][6]; + uint32_t w_l_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_l]; + uint32_t w_r_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_r]; + uint32_t w_o_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_o]; + uint32_t w_l_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_l_shift]; + uint32_t w_r_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_r_shift]; + uint32_t w_o_shift_idx = this->wires_idxs[BlockType::LOOKUP][cursor][WireType::w_o_shift]; optimized[w_l_idx] = false; optimized[w_r_idx] = false; @@ -309,19 +305,19 @@ size_t UltraCircuit::handle_lookup_relation(size_t cursor) */ size_t UltraCircuit::handle_elliptic_relation(size_t cursor) { - bb::fr q_is_double = this->selectors[BlockType::ELLIPTIC][cursor][0]; - bb::fr q_sign = this->selectors[BlockType::ELLIPTIC][cursor][1]; - bb::fr q_elliptic = this->selectors[BlockType::ELLIPTIC][cursor][8]; + bb::fr q_is_double = this->selectors[BlockType::ELLIPTIC][cursor][SelectorType::q_m]; + bb::fr q_sign = this->selectors[BlockType::ELLIPTIC][cursor][SelectorType::q_1]; + bb::fr q_elliptic = this->selectors[BlockType::ELLIPTIC][cursor][SelectorType::q_elliptic]; if (q_elliptic.is_zero()) { return cursor + 1; } - uint32_t w_r_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][2]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][4]; - uint32_t w_r_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][5]; - uint32_t w_o_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][6]; - uint32_t w_4_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][7]; + uint32_t w_r_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_r]; + uint32_t w_o_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_o]; + uint32_t w_l_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_l_shift]; + uint32_t w_r_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_r_shift]; + uint32_t w_o_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_o_shift]; + uint32_t w_4_shift_idx = this->wires_idxs[BlockType::ELLIPTIC][cursor][WireType::w_4_shift]; optimized[w_r_idx] = false; optimized[w_o_idx] = false; optimized[w_l_shift_idx] = false; @@ -351,7 +347,7 @@ size_t UltraCircuit::handle_elliptic_relation(size_t cursor) y_add_identity == 0; // scaling_factor = 1 } - bb::fr curve_b = this->selectors[BlockType::ELLIPTIC][cursor][11]; + bb::fr curve_b = this->selectors[BlockType::ELLIPTIC][cursor][SelectorType::curve_b]; auto x_pow_4 = (y1_sqr - curve_b) * x_1; auto y1_sqr_mul_4 = y1_sqr + y1_sqr; y1_sqr_mul_4 += y1_sqr_mul_4; @@ -378,16 +374,16 @@ size_t UltraCircuit::handle_elliptic_relation(size_t cursor) */ size_t UltraCircuit::handle_delta_range_relation(size_t cursor) { - bb::fr q_delta_range = this->selectors[BlockType::DELTA_RANGE][cursor][7]; + bb::fr q_delta_range = this->selectors[BlockType::DELTA_RANGE][cursor][SelectorType::q_delta_range]; if (q_delta_range == 0) { return cursor + 1; } - uint32_t w_l_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][0]; - uint32_t w_r_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][2]; - uint32_t w_4_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][3]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][4]; + uint32_t w_l_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][WireType::w_l]; + uint32_t w_r_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][WireType::w_r]; + uint32_t w_o_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][WireType::w_o]; + uint32_t w_4_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][WireType::w_4]; + uint32_t w_l_shift_idx = this->wires_idxs[BlockType::DELTA_RANGE][cursor][WireType::w_l_shift]; STerm w_1 = this->symbolic_vars[w_l_idx]; STerm w_2 = this->symbolic_vars[w_r_idx]; @@ -435,10 +431,10 @@ void UltraCircuit::handle_range_constraints() if (this->type == TermType::FFTerm || !this->enable_optimizations) { if (!this->cached_range_tables.contains(range)) { std::vector new_range_table; - for (size_t entry = 0; entry < range; entry++) { + for (size_t entry = 0; entry <= range; entry++) { new_range_table.push_back(STerm(entry, this->solver, this->type)); } - std::string table_name = this->tag + "RANGE_" + std::to_string(range); + std::string table_name = "RANGE_" + std::to_string(range) + this->tag; SymSet new_range_stable(new_range_table, table_name); info(RED, "Initialized new range: ", table_name, RESET); this->cached_range_tables.insert({ range, new_range_stable }); @@ -452,75 +448,6 @@ void UltraCircuit::handle_range_constraints() } } -/** - * @brief Adds all the memory constraints to the solver. - * - * @param cursor current selector - * @return new cursor value - */ - -size_t UltraCircuit::handle_memory_relation(size_t cursor) -{ - // Note: all of the hardcoded indices for extracting components in this module seem to be wrong/outdated - bb::fr q_memory = this->selectors[BlockType::MEMORY][cursor][9]; - if (q_memory == 0) { - return cursor + 1; - } - - uint32_t w_l_idx = this->wires_idxs[BlockType::MEMORY][cursor][0]; - uint32_t w_r_idx = this->wires_idxs[BlockType::MEMORY][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::MEMORY][cursor][2]; - uint32_t w_4_idx = this->wires_idxs[BlockType::MEMORY][cursor][3]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::MEMORY][cursor][4]; - uint32_t w_r_shift_idx = this->wires_idxs[BlockType::MEMORY][cursor][5]; - uint32_t w_o_shift_idx = this->wires_idxs[BlockType::MEMORY][cursor][6]; - uint32_t w_4_shift_idx = this->wires_idxs[BlockType::MEMORY][cursor][7]; - - STerm w_1 = this->symbolic_vars[w_l_idx]; - STerm w_2 = this->symbolic_vars[w_r_idx]; - STerm w_3 = this->symbolic_vars[w_o_idx]; - STerm w_4 = this->symbolic_vars[w_4_idx]; - STerm w_1_shift = this->symbolic_vars[w_l_shift_idx]; - STerm w_2_shift = this->symbolic_vars[w_r_shift_idx]; - STerm w_3_shift = this->symbolic_vars[w_o_shift_idx]; - STerm w_4_shift = this->symbolic_vars[w_4_shift_idx]; - - bb::fr q_m = this->selectors[BlockType::MEMORY][cursor][0]; - bb::fr q_1 = this->selectors[BlockType::MEMORY][cursor][1]; - bb::fr q_2 = this->selectors[BlockType::MEMORY][cursor][2]; - bb::fr q_3 = this->selectors[BlockType::MEMORY][cursor][3]; - bb::fr q_4 = this->selectors[BlockType::MEMORY][cursor][4]; - - // reassure that only one entry - size_t entry_flag = 0; - - // Skip RAM/ROM relations here - if (q_1 != 0 && q_m != 0) { - entry_flag += 1; - // RAM/ROM access gate - } - - if (q_1 != 0 && q_4 != 0) { - entry_flag += 1; - // RAM timestamp check - } - - if (q_1 != 0 && q_2 != 0) { - entry_flag += 1; - // ROM consistency check - } - - if (q_3) { - entry_flag += 1; - // RAM consistency check - } - - if (entry_flag > 1) { - throw std::runtime_error("Double entry in AUX"); - } - return cursor + 1; -} - /** * @brief Adds all the nnf constraints to the solver. * @@ -530,19 +457,19 @@ size_t UltraCircuit::handle_memory_relation(size_t cursor) size_t UltraCircuit::handle_nnf_relation(size_t cursor) { - bb::fr q_nnf = this->selectors[BlockType::NNF][cursor][9]; // Magic 9? + bb::fr q_nnf = this->selectors[BlockType::NNF][cursor][SelectorType::q_nnf]; if (q_nnf == 0) { return cursor + 1; } - uint32_t w_l_idx = this->wires_idxs[BlockType::NNF][cursor][0]; - uint32_t w_r_idx = this->wires_idxs[BlockType::NNF][cursor][1]; - uint32_t w_o_idx = this->wires_idxs[BlockType::NNF][cursor][2]; - uint32_t w_4_idx = this->wires_idxs[BlockType::NNF][cursor][3]; - uint32_t w_l_shift_idx = this->wires_idxs[BlockType::NNF][cursor][4]; - uint32_t w_r_shift_idx = this->wires_idxs[BlockType::NNF][cursor][5]; - uint32_t w_o_shift_idx = this->wires_idxs[BlockType::NNF][cursor][6]; - uint32_t w_4_shift_idx = this->wires_idxs[BlockType::NNF][cursor][7]; + uint32_t w_l_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_l]; + uint32_t w_r_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_r]; + uint32_t w_o_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_o]; + uint32_t w_4_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_4]; + uint32_t w_l_shift_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_l_shift]; + uint32_t w_r_shift_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_r_shift]; + uint32_t w_o_shift_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_o_shift]; + uint32_t w_4_shift_idx = this->wires_idxs[BlockType::NNF][cursor][WireType::w_4_shift]; STerm w_1 = this->symbolic_vars[w_l_idx]; STerm w_2 = this->symbolic_vars[w_r_idx]; @@ -553,11 +480,10 @@ size_t UltraCircuit::handle_nnf_relation(size_t cursor) STerm w_3_shift = this->symbolic_vars[w_o_shift_idx]; STerm w_4_shift = this->symbolic_vars[w_4_shift_idx]; - bb::fr q_m = this->selectors[BlockType::NNF][cursor][0]; - bb::fr q_1 = this->selectors[BlockType::NNF][cursor][1]; - bb::fr q_2 = this->selectors[BlockType::NNF][cursor][2]; - bb::fr q_3 = this->selectors[BlockType::NNF][cursor][3]; - bb::fr q_4 = this->selectors[BlockType::NNF][cursor][4]; + bb::fr q_m = this->selectors[BlockType::NNF][cursor][SelectorType::q_m]; + bb::fr q_2 = this->selectors[BlockType::NNF][cursor][SelectorType::q_2]; + bb::fr q_3 = this->selectors[BlockType::NNF][cursor][SelectorType::q_3]; + bb::fr q_4 = this->selectors[BlockType::NNF][cursor][SelectorType::q_4]; bb::fr LIMB_SIZE(uint256_t(1) << 68); bb::fr SUBLIMB_SHIFT(uint256_t(1) << 14); @@ -631,7 +557,7 @@ size_t UltraCircuit::handle_nnf_relation(size_t cursor) } if (entry_flag > 1) { - throw std::runtime_error("Double entry in AUX"); + throw std::runtime_error("Double entry in NNF"); } return cursor + 1; } diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.hpp index 58b3a61dc37d..ed1d66d159ba 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.hpp @@ -17,6 +17,33 @@ struct BlockType { static const size_t NNF = 6; }; +struct SelectorType { + static const size_t q_m = 0; + static const size_t q_1 = 1; + static const size_t q_2 = 2; + static const size_t q_3 = 3; + static const size_t q_4 = 4; + static const size_t q_c = 5; + static const size_t q_arith = 6; + static const size_t q_delta_range = 7; + static const size_t q_elliptic = 8; + static const size_t q_memory = 9; + static const size_t q_nnf = 10; + static const size_t q_lookup = 11; + static const size_t curve_b = 12; +}; + +struct WireType { + static const size_t w_l = 0; + static const size_t w_r = 1; + static const size_t w_o = 2; + static const size_t w_4 = 3; + static const size_t w_l_shift = 4; + static const size_t w_r_shift = 5; + static const size_t w_o_shift = 6; + static const size_t w_4_shift = 7; +}; + /** * @brief Symbolic Circuit class for Standard Circuit Builder. * @@ -25,7 +52,6 @@ struct BlockType { */ class UltraCircuit : public CircuitBase { public: - // TODO(alex): check that there's no actual pub_inputs block std::vector>> selectors; // all selectors from the circuit // 1st entry are lookup selectors // 2nd entry are arithmetic selectors @@ -85,14 +111,13 @@ class UltraCircuit : public CircuitBase { size_t handle_lookup_relation(size_t cursor); size_t handle_elliptic_relation(size_t cursor); size_t handle_delta_range_relation(size_t cursor); - size_t handle_memory_relation(size_t cursor); size_t handle_nnf_relation(size_t cursor); void handle_range_constraints(); void rom_table_read(uint32_t rom_array_idx, uint32_t index_idx, uint32_t value1_idx, uint32_t value2_idx); void ram_table_read(uint32_t ram_array_idx, uint32_t index_idx, uint32_t value_idx); - void ram_table_write(uint32_t rom_array_idx, uint32_t index_idx, uint32_t value_idx); + void ram_table_write(uint32_t ram_array_idx, uint32_t ram_index_idx, uint32_t read_from_value_idx); void handle_rom_tables(); void handle_ram_tables(); diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.test.cpp index 446797815a9e..158dcf71053f 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.test.cpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/circuit/ultra_circuit.test.cpp @@ -8,10 +8,10 @@ #include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" #include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" #include "barretenberg/stdlib/primitives/logic/logic.hpp" #include "barretenberg/stdlib/primitives/memory/ram_table.hpp" #include "barretenberg/stdlib/primitives/memory/rom_table.hpp" -#include "barretenberg/stdlib/primitives/uint/uint.hpp" #include "barretenberg/smt_verification/circuit/ultra_circuit.hpp" #include "barretenberg/smt_verification/util/smt_util.hpp" @@ -32,9 +32,9 @@ using pub_witness_t = stdlib::public_witness_t; using field_t = stdlib::field_t; using bigfield_t = bb::stdlib::bigfield; -using uint_t = stdlib::uint32; using rom_table_t = bb::stdlib::rom_table; using ram_table_t = bb::stdlib::ram_table; +using cycle_group_t = bb::stdlib::cycle_group; TEST(UltraCircuitSMT, AssertEqual) { @@ -106,35 +106,28 @@ TEST(UltraCircuitSMT, EllipticRelationADD) { UltraCircuitBuilder builder; - bb::grumpkin::g1::affine_element p1 = bb::crypto::pedersen_commitment::commit_native({ bb::fr::one() }, 0); - bb::grumpkin::g1::affine_element p2 = bb::crypto::pedersen_commitment::commit_native({ bb::fr::one() }, 1); - bb::grumpkin::g1::affine_element p3 = bb::grumpkin::g1::element(p1) + bb::grumpkin::g1::element(p2); + auto p1 = + cycle_group_t::from_witness(&builder, bb::stdlib::cycle_group::Curve::AffineElement::random_element()); + auto p2 = + cycle_group_t::from_witness(&builder, bb::stdlib::cycle_group::Curve::AffineElement::random_element()); + auto p3 = p1.unconditional_add(p2); - uint32_t x1 = builder.add_variable(p1.x); - uint32_t y1 = builder.add_variable(p1.y); - uint32_t x2 = builder.add_variable(p2.x); - uint32_t y2 = builder.add_variable(p2.y); - uint32_t x3 = builder.add_variable(p3.x); - uint32_t y3 = builder.add_variable(p3.y); - - builder.set_variable_name(x1, "x1"); - builder.set_variable_name(x2, "x2"); - builder.set_variable_name(x3, "x3"); - builder.set_variable_name(y1, "y1"); - builder.set_variable_name(y2, "y2"); - builder.set_variable_name(y3, "y3"); - - builder.create_ecc_add_gate({ x1, y1, x2, y2, x3, y3, 1 }); + builder.set_variable_name(p1.x.get_witness_index(), "x1"); + builder.set_variable_name(p2.x.get_witness_index(), "x2"); + builder.set_variable_name(p3.x.get_witness_index(), "x3"); + builder.set_variable_name(p1.y.get_witness_index(), "y1"); + builder.set_variable_name(p2.y.get_witness_index(), "y2"); + builder.set_variable_name(p3.y.get_witness_index(), "y3"); auto circuit_info = unpack_from_buffer(builder.export_circuit()); Solver s(circuit_info.modulus, ultra_solver_config); UltraCircuit cir(circuit_info, &s); ASSERT_EQ(cir.get_num_gates(), builder.get_estimated_num_finalized_gates()); - cir["x1"] == builder.get_variable(x1); - cir["x2"] == builder.get_variable(x2); - cir["y1"] == builder.get_variable(y1); - cir["y2"] == builder.get_variable(y2); + cir["x1"] == p1.x.get_value(); + cir["x2"] == p2.x.get_value(); + cir["y1"] == p1.y.get_value(); + cir["y2"] == p2.y.get_value(); bool res = s.check(); ASSERT_TRUE(res); @@ -142,14 +135,14 @@ TEST(UltraCircuitSMT, EllipticRelationADD) bb::fr x3_solver_val = string_to_fr(s[cir["x3"]], /*base=*/10); bb::fr y3_solver_val = string_to_fr(s[cir["y3"]], /*base=*/10); - bb::fr x3_builder_val = builder.get_variable(x3); - bb::fr y3_builder_val = builder.get_variable(y3); + bb::fr x3_builder_val = p3.x.get_value(); + bb::fr y3_builder_val = p3.y.get_value(); ASSERT_EQ(x3_solver_val, x3_builder_val); ASSERT_EQ(y3_solver_val, y3_builder_val); - ((Bool(cir["x3"]) != Bool(STerm(builder.get_variable(x3), &s, TermType::FFTerm))) | - (Bool(cir["y3"]) != Bool(STerm(builder.get_variable(y3), &s, TermType::FFTerm)))) + ((Bool(cir["x3"]) != Bool(STerm(x3_builder_val, &s, TermType::FFTerm))) | + (Bool(cir["y3"]) != Bool(STerm(y3_builder_val, &s, TermType::FFTerm)))) .assert_term(); res = s.check(); ASSERT_FALSE(res); @@ -159,42 +152,39 @@ TEST(UltraCircuitSMT, EllipticRelationDBL) { UltraCircuitBuilder builder; - bb::grumpkin::g1::affine_element p1 = bb::crypto::pedersen_commitment::commit_native({ bb::fr::one() }, 0); - bb::grumpkin::g1::affine_element p3 = bb::grumpkin::g1::element(p1).dbl(); + auto p1 = + cycle_group_t::from_witness(&builder, bb::stdlib::cycle_group::Curve::AffineElement::random_element()); + auto p2 = p1.dbl(); - uint32_t x1 = builder.add_variable(p1.x); - uint32_t y1 = builder.add_variable(p1.y); - uint32_t x3 = builder.add_variable(p3.x); - uint32_t y3 = builder.add_variable(p3.y); - builder.set_variable_name(x1, "x1"); - builder.set_variable_name(x3, "x3"); - builder.set_variable_name(y1, "y1"); - builder.set_variable_name(y3, "y3"); - - builder.create_ecc_dbl_gate({ x1, y1, x3, y3 }); + builder.set_variable_name(p1.x.get_witness_index(), "x1"); + builder.set_variable_name(p2.x.get_witness_index(), "x2"); + builder.set_variable_name(p1.y.get_witness_index(), "y1"); + builder.set_variable_name(p2.y.get_witness_index(), "y2"); + builder.set_variable_name(p1.is_point_at_infinity().get_normalized_witness_index(), "is_inf"); auto circuit_info = unpack_from_buffer(builder.export_circuit()); Solver s(circuit_info.modulus, ultra_solver_config); UltraCircuit cir(circuit_info, &s); ASSERT_EQ(cir.get_num_gates(), builder.get_estimated_num_finalized_gates()); - cir["x1"] == builder.get_variable(x1); - cir["y1"] == builder.get_variable(y1); + cir["x1"] == p1.x.get_value(); + cir["y1"] == p1.y.get_value(); + cir["is_inf"] == static_cast(p1.is_point_at_infinity().get_value()); bool res = s.check(); ASSERT_TRUE(res); - bb::fr x3_solver_val = string_to_fr(s[cir["x3"]], /*base=*/10); - bb::fr y3_solver_val = string_to_fr(s[cir["y3"]], /*base=*/10); + bb::fr x2_solver_val = string_to_fr(s[cir["x2"]], /*base=*/10); + bb::fr y2_solver_val = string_to_fr(s[cir["y2"]], /*base=*/10); - bb::fr x3_builder_val = builder.get_variable(x3); - bb::fr y3_builder_val = builder.get_variable(y3); + bb::fr x2_builder_val = p2.x.get_value(); + bb::fr y2_builder_val = p2.y.get_value(); - ASSERT_EQ(x3_solver_val, x3_builder_val); - ASSERT_EQ(y3_solver_val, y3_builder_val); + ASSERT_EQ(x2_solver_val, x2_builder_val); + ASSERT_EQ(y2_solver_val, y2_builder_val); - ((Bool(cir["x3"]) != Bool(STerm(builder.get_variable(x3), &s, TermType::FFTerm))) | - (Bool(cir["y3"]) != Bool(STerm(builder.get_variable(y3), &s, TermType::FFTerm)))) + ((Bool(cir["x2"]) != Bool(STerm(x2_builder_val, &s, TermType::FFTerm))) | + (Bool(cir["y2"]) != Bool(STerm(y2_builder_val, &s, TermType::FFTerm)))) .assert_term(); res = s.check(); ASSERT_FALSE(res); @@ -204,7 +194,8 @@ TEST(UltraCircuitSMT, OptimizedDeltaRangeRelation) { UltraCircuitBuilder builder; - uint_t a(witness_t(&builder, engine.get_random_uint32())); + field_t a(witness_t(&builder, engine.get_random_uint32())); + a.create_range_constraint(32); builder.set_variable_name(a.get_witness_index(), "a"); builder.finalize_circuit(/*ensure_nonzero=*/false); // No need to add nonzero gates if we're not proving @@ -249,16 +240,16 @@ TEST(UltraCircuitSMT, LookupRelation2) { UltraCircuitBuilder builder; - uint_t a(witness_t(&builder, engine.get_random_uint32())); - uint_t b(witness_t(&builder, engine.get_random_uint32())); - uint_t c = a ^ b; + field_t a(witness_t(&builder, engine.get_random_uint32())); + field_t b(witness_t(&builder, engine.get_random_uint32())); + field_t c = bb::stdlib::logic::create_logic_constraint(a, b, /*num_bits=*/32, /*is_xor_gate=*/true); builder.set_variable_name(a.get_witness_index(), "a"); builder.set_variable_name(b.get_witness_index(), "b"); builder.set_variable_name(c.get_witness_index(), "c"); builder.finalize_circuit(/*ensure_nonzero=*/false); // No need to add nonzero gates if we're not proving auto circuit_info = unpack_from_buffer(builder.export_circuit()); - Solver s(circuit_info.modulus, ultra_solver_config, /*base=*/16, /*bvsize=*/32); + Solver s(circuit_info.modulus, ultra_solver_config, /*base=*/16, /*bvsize=*/256); UltraCircuit cir(circuit_info, &s, TermType::BVTerm); ASSERT_EQ(cir.get_num_gates(), builder.get_estimated_num_finalized_gates()); @@ -274,8 +265,9 @@ TEST(UltraCircuitSMT, LookupRelation2) ASSERT_EQ(c_solver_val, c_builder_val); } -// Due to ranges being huge it takes 5 min 32 sec to finish -// TEST(UltraCircuitSMT, AuxRelation) +//// Due to ranges being huge it takes 5 min 32 sec to finish +// TODO(alex): Wait until the bug with large sets is resolved by cvc5 +// TEST(UltraCircuitSMT, NNFRelation) //{ // UltraCircuitBuilder builder; // @@ -287,11 +279,13 @@ TEST(UltraCircuitSMT, LookupRelation2) // Solver slv(circuit_info.modulus, /*config=*/debug_solver_config, /*base=*/16); // UltraCircuit cir(circuit_info, &slv, TermType::FFTerm); // -// for(uint32_t i = 0; i < builder.variables.size(); i++){ -// cir[i] == builder.variables[i]; +// for(uint32_t i = 0; i < builder.get_variables().size(); i++){ +// if (!cir.optimized[i]){ +// cir[i] == builder.get_variables()[i]; +// } // } // -// // slv.print_assertions(); +// slv.print_assertions(); // bool res = smt_timer(&slv); // ASSERT_TRUE(res); //} @@ -467,16 +461,16 @@ TEST(UltraCircuitSMT, RAMTablesRelaxed) TEST(UltraCircuitSMT, XorOptimization) { UltraCircuitBuilder builder; - uint_t a(witness_t(&builder, engine.get_random_uint32())); + field_t a(witness_t(&builder, engine.get_random_uint32())); builder.set_variable_name(a.get_witness_index(), "a"); - uint_t b(witness_t(&builder, engine.get_random_uint32())); + field_t b(witness_t(&builder, engine.get_random_uint32())); builder.set_variable_name(b.get_witness_index(), "b"); - uint_t c = a ^ b; + field_t c = bb::stdlib::logic::create_logic_constraint(a, b, /*num_bits=*/32, /*is_xor_gate=*/true); builder.set_variable_name(c.get_witness_index(), "c"); CircuitSchema circuit_info = unpack_from_buffer(builder.export_circuit()); uint32_t modulus_base = 16; - uint32_t bvsize = 35; + uint32_t bvsize = 256; Solver s(circuit_info.modulus, ultra_solver_config, modulus_base, bvsize); UltraCircuit circuit(circuit_info, &s, TermType::BVTerm); @@ -497,16 +491,16 @@ TEST(UltraCircuitSMT, XorOptimization) TEST(UltraCircuitSMT, AndOptimization) { UltraCircuitBuilder builder; - uint_t a(witness_t(&builder, engine.get_random_uint32())); + field_t a(witness_t(&builder, engine.get_random_uint32())); builder.set_variable_name(a.get_witness_index(), "a"); - uint_t b(witness_t(&builder, engine.get_random_uint32())); + field_t b(witness_t(&builder, engine.get_random_uint32())); builder.set_variable_name(b.get_witness_index(), "b"); - uint_t c = a & b; + field_t c = bb::stdlib::logic::create_logic_constraint(a, b, /*num_bits=*/32, /*is_xor_gate=*/false); builder.set_variable_name(c.get_witness_index(), "c"); CircuitSchema circuit_info = unpack_from_buffer(builder.export_circuit()); uint32_t modulus_base = 16; - uint32_t bvsize = 35; + uint32_t bvsize = 256; Solver s(circuit_info.modulus, ultra_solver_config, modulus_base, bvsize); UltraCircuit circuit(circuit_info, &s, TermType::BVTerm); diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/smt_examples.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/smt_examples.test.cpp new file mode 100644 index 000000000000..9dfeefa23029 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/smt_verification/smt_examples.test.cpp @@ -0,0 +1,185 @@ +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" +#include +#include +#include +#include + +#include "barretenberg/stdlib/primitives/field/field.hpp" + +#include "barretenberg/smt_verification/circuit/ultra_circuit.hpp" + +using namespace bb; + +using Builder = UltraCircuitBuilder; +using field_t = stdlib::field_t; +using witness_t = stdlib::witness_t; +using pub_witness_t = stdlib::public_witness_t; + +TEST(smtExample, multiplication_true) +{ + Builder builder; + + field_t a(witness_t(&builder, fr::random_element())); + field_t b(witness_t(&builder, fr::random_element())); + field_t c = (a + a) / (b + b + b); + + builder.set_variable_name(a.witness_index, "a"); + builder.set_variable_name(b.witness_index, "b"); + builder.set_variable_name(c.witness_index, "c"); + ASSERT_TRUE(CircuitChecker::check(builder)); + + auto buf = builder.export_circuit(); + + smt_circuit::CircuitSchema circuit_info = smt_circuit_schema::unpack_from_buffer(buf); + smt_solver::Solver s(circuit_info.modulus); + smt_circuit::UltraCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); + smt_terms::STerm a1 = circuit["a"]; + smt_terms::STerm b1 = circuit["b"]; + smt_terms::STerm c1 = circuit["c"]; + smt_terms::STerm two = smt_terms::FFConst("2", &s, 10); + smt_terms::STerm thr = smt_terms::FFConst("3", &s, 10); + smt_terms::STerm cr = smt_terms::FFVar("cr", &s); + cr = (two * a1) / (thr * b1); + c1 != cr; + + bool res = s.check(); + ASSERT_FALSE(res); +} + +TEST(smtExample, multiplication_true_kind) +{ + Builder builder; + + field_t a(witness_t(&builder, fr::random_element())); + field_t b(witness_t(&builder, fr::random_element())); + field_t c = (a + a) / (b + b + b); + + builder.set_variable_name(a.witness_index, "a"); + builder.set_variable_name(b.witness_index, "b"); + builder.set_variable_name(c.witness_index, "c"); + ASSERT_TRUE(CircuitChecker::check(builder)); + + auto buf = builder.export_circuit(); + + smt_circuit::CircuitSchema circuit_info = smt_circuit_schema::unpack_from_buffer(buf); + smt_solver::Solver s(circuit_info.modulus); + smt_circuit::UltraCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); + smt_terms::STerm a1 = circuit["a"]; + smt_terms::STerm b1 = circuit["b"]; + smt_terms::STerm c1 = circuit["c"]; + smt_terms::STerm two = smt_terms::FFConst("2", &s, 10); + smt_terms::STerm thr = smt_terms::FFConst("3", &s, 10); + smt_terms::STerm cr = smt_terms::FFVar("cr", &s); + cr* thr* b1 == two* a1; + c1 != cr; + + bool res = s.check(); + ASSERT_FALSE(res); +} + +TEST(smtExample, multiplication_false) +{ + Builder builder; + + field_t a(witness_t(&builder, fr::random_element())); + field_t b(witness_t(&builder, fr::random_element())); + field_t c = (a) / (b + b + b); // mistake was here + + builder.set_variable_name(a.witness_index, "a"); + builder.set_variable_name(b.witness_index, "b"); + builder.set_variable_name(c.witness_index, "c"); + ASSERT_TRUE(CircuitChecker::check(builder)); + + auto buf = builder.export_circuit(); + + smt_circuit::CircuitSchema circuit_info = smt_circuit_schema::unpack_from_buffer(buf); + smt_solver::Solver s(circuit_info.modulus); + smt_circuit::UltraCircuit circuit(circuit_info, &s, smt_terms::TermType::FFTerm); + + smt_terms::STerm a1 = circuit["a"]; + smt_terms::STerm b1 = circuit["b"]; + smt_terms::STerm c1 = circuit["c"]; + + smt_terms::STerm two = smt_terms::FFConst("2", &s, 10); + smt_terms::STerm thr = smt_terms::FFConst("3", &s, 10); + smt_terms::STerm cr = smt_terms::FFVar("cr", &s); + cr = (two * a1) / (thr * b1); + c1 != cr; + + bool res = s.check(); + ASSERT_TRUE(res); + + std::unordered_map terms({ { "a", a1 }, { "b", b1 }, { "c", c1 }, { "cr", cr } }); + + std::unordered_map vals = s.model(terms); + + info("a = ", vals["a"]); + info("b = ", vals["b"]); + info("c = ", vals["c"]); + info("c_res = ", vals["cr"]); +} + +// Make sure that quadratic polynomial evaluation doesn't have unique +// witness using unique_witness_ext function +// Find both roots of a quadratic equation x^2 + a * x + b = s +TEST(smtExample, unique_witness_ext) +{ + Builder builder; + + field_t a(pub_witness_t(&builder, fr::random_element())); + field_t b(pub_witness_t(&builder, fr::random_element())); + builder.set_variable_name(a.witness_index, "a"); + builder.set_variable_name(b.witness_index, "b"); + field_t z(witness_t(&builder, fr::random_element())); + field_t ev = z * z + a * z + b; + builder.set_variable_name(z.witness_index, "z"); + builder.set_variable_name(ev.witness_index, "ev"); + + auto buf = builder.export_circuit(); + + smt_circuit::CircuitSchema circuit_info = smt_circuit_schema::unpack_from_buffer(buf); + smt_solver::Solver s(circuit_info.modulus); + + std::pair cirs = + smt_circuit::UltraCircuit::unique_witness_ext(circuit_info, &s, smt_terms::TermType::FFTerm, { "ev" }, { "z" }); + + bool res = s.check(); + ASSERT_TRUE(res); + + std::unordered_map terms = { { "z_c1", cirs.first["z"] }, { "z_c2", cirs.second["z"] } }; + std::unordered_map vals = s.model(terms); + ASSERT_NE(vals["z_c1"], vals["z_c2"]); +} + +// Make sure that quadratic polynomial evaluation doesn't have unique +// witness using unique_witness function +// Finds both roots of a quadratic eq x^2 + a * x + b = s +TEST(smtExample, unique_witness) +{ + Builder builder; + + field_t a(pub_witness_t(&builder, fr::random_element())); + field_t b(pub_witness_t(&builder, fr::random_element())); + builder.set_variable_name(a.witness_index, "a"); + builder.set_variable_name(b.witness_index, "b"); + field_t z(witness_t(&builder, fr::random_element())); + field_t ev = z * z + a * z + b; + builder.set_variable_name(z.witness_index, "z"); + builder.set_variable_name(ev.witness_index, "ev"); + + auto buf = builder.export_circuit(); + + smt_circuit::CircuitSchema circuit_info = smt_circuit_schema::unpack_from_buffer(buf); + smt_solver::Solver s(circuit_info.modulus); + + std::pair cirs = + smt_circuit::UltraCircuit::unique_witness(circuit_info, &s, smt_terms::TermType::FFTerm, { "ev" }); + + bool res = s.check(); + ASSERT_TRUE(res); + + std::unordered_map terms = { { "z_c1", cirs.first["z"] }, { "z_c2", cirs.second["z"] } }; + std::unordered_map vals = s.model(terms); + ASSERT_NE(vals["z_c1"], vals["z_c2"]); +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/smt_polynomials.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/smt_polynomials.test.cpp new file mode 100644 index 000000000000..63c188c01aef --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/smt_verification/smt_polynomials.test.cpp @@ -0,0 +1,104 @@ +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" + +#include "barretenberg/smt_verification/circuit/ultra_circuit.hpp" +#include "barretenberg/smt_verification/util/smt_util.hpp" + +#include + +using namespace bb; +using namespace smt_circuit; + +using Builder = UltraCircuitBuilder; +using field_t = stdlib::field_t; +using witness_t = stdlib::witness_t; +using pub_witness_t = stdlib::public_witness_t; + +msgpack::sbuffer create_polynomial_evaluation_circuit(size_t n, bool pub_coeffs) +{ + Builder builder; + + std::vector coeffs; + for (size_t i = 0; i < n; i++) { + if (pub_coeffs) { + coeffs.emplace_back(pub_witness_t(&builder, fr::random_element())); + } else { + coeffs.emplace_back(witness_t(&builder, fr::random_element())); + } + builder.set_variable_name(coeffs.back().get_witness_index(), "coeff_" + std::to_string(i)); + } + + field_t z(witness_t(&builder, 10)); + builder.set_variable_name(z.get_witness_index(), "point"); + + field_t res = field_t::from_witness_index(&builder, 0); + + for (size_t i = 0; i < n; i++) { + res = res * z + coeffs[i]; + } + builder.set_variable_name(res.get_witness_index(), "result"); + + info("evaluation at point ", z, ": ", res); + info("gates: ", builder.num_gates); + info("variables: ", builder.get_num_variables()); + info("public inputs: ", builder.num_public_inputs()); + + return builder.export_circuit(); +} + +STerm direct_polynomial_evaluation(UltraCircuit& c, size_t n) +{ + STerm point = c["point"]; + STerm result = c["result"]; + STerm ev = c["zero"]; + for (size_t i = 0; i < n; i++) { + ev = ev * point + c["coeff_" + std::to_string(i)]; + } + return ev; +} + +void model_variables(UltraCircuit& c, Solver* s, STerm& evaluation) +{ + std::unordered_map terms; + terms.insert({ "point", c["point"] }); + terms.insert({ "result", c["result"] }); + terms.insert({ "evaluation", evaluation }); + + auto values = s->model(terms); + + info("point = ", values["point"]); + info("circuit_result = ", values["result"]); + info("function_evaluation = ", values["evaluation"]); +} + +TEST(PolynomialEvaluation, public) +{ + size_t n = 40; + auto buf = create_polynomial_evaluation_circuit(n, true); + + CircuitSchema circuit_info = unpack_from_buffer(buf); + Solver s(circuit_info.modulus); + UltraCircuit circuit(circuit_info, &s, TermType::FFTerm); + STerm ev = direct_polynomial_evaluation(circuit, n); + ev != circuit["result"]; + + bool res = smt_timer(&s); + ASSERT_FALSE(res); +} + +TEST(PolynomialEvaluation, private) +{ + size_t n = 40; + auto buf = create_polynomial_evaluation_circuit(n, false); + + CircuitSchema circuit_info = unpack_from_buffer(buf); + Solver s(circuit_info.modulus); + UltraCircuit circuit(circuit_info, &s, TermType::FFTerm); + STerm ev = direct_polynomial_evaluation(circuit, n); + ev != circuit["result"]; + + bool res = smt_timer(&s); + ASSERT_FALSE(res); + info("Gates: ", circuit.get_num_gates()); + info("Result: ", s.getResult()); +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/solver/solver.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/solver/solver.hpp index c108faf717dd..bc990d5432d3 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/solver/solver.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/solver/solver.hpp @@ -109,6 +109,7 @@ class Solver { solver.setOption("stats", "true"); } if (config.debug >= 2) { + solver.setOption("lang", "smt2"); solver.setOption("output", "inst"); solver.setOption("output", "learned-lits"); solver.setOption("output", "subs"); diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.hpp index 5bd78d3b85ee..d1cea2f804b8 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.hpp @@ -21,11 +21,11 @@ class Bool { Bool(const cvc5::Term& t, Solver* slv, TermType type = TermType::SBool) : solver(slv) , term(t) - , type(type){}; + , type(type) {}; explicit Bool(const STerm& t) : solver(t.solver) - , term(t.normalize().term){}; + , term(t.normalize().term) {}; explicit Bool(const std::string& name, Solver* slv) : solver(slv) diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.test.cpp index ea700fb6630c..12326ff3714d 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.test.cpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bool.test.cpp @@ -48,7 +48,7 @@ TEST(SymbolicBool, or) ASSERT_EQ(bb::fr(c), zvals); } -TEST(SymbolicBool, not ) +TEST(SymbolicBool, not) { Solver slv("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); bool a = static_cast(engine.get_random_uint8() & 1); diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/bvterm.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bvterm.test.cpp new file mode 100644 index 000000000000..520912dfd5aa --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/bvterm.test.cpp @@ -0,0 +1,338 @@ +#include +#include + +#include "barretenberg/smt_verification/util/smt_util.hpp" +#include "barretenberg/stdlib/primitives/logic/logic.hpp" +#include "term.hpp" + +#include + +namespace { +auto& engine = bb::numeric::get_randomness(); +} + +using namespace bb; +using Builder = UltraCircuitBuilder; +using witness_ct = stdlib::witness_t; + +using namespace smt_terms; + +TEST(BVTerm, addition) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a + b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x + y; + + z == c; + x == a; + ASSERT_TRUE(s.check()); + + bb::fr yvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), yvals); +} + +TEST(BVTerm, subtraction) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a - b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x - y; + + z == c; + x == a; + ASSERT_TRUE(s.check()); + + bb::fr yvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), yvals); +} + +TEST(BVTerm, xor) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a ^ b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x ^ y; + + z == c; + x == a; + ASSERT_TRUE(s.check()); + + bb::fr yvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), yvals); +} + +TEST(BVTerm, rotr) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = (a >> 10) | (a << 22); + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x.rotr(10); + + y == b; + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[x], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(a), xvals); +} + +TEST(BVTerm, rotl) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = (a << 10) | (a >> 22); + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x.rotl(10); + + y == b; + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[x], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(a), xvals); +} + +// non bijective operators +TEST(BVTerm, mul) +{ + Builder builder; + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a * b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x * y; + + x == a; + y == b; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[z], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(c), xvals); +} + +TEST(BVTerm, and) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a & b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x & y; + + x == a; + y == b; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[z], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(c), xvals); +} + +TEST(BVTerm, or) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a | b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x | y; + + x == a; + y == b; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[z], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(c), xvals); +} + +TEST(BVTerm, div) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = engine.get_random_uint32(); + uint32_t c = a / b; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = BVVar("y", &s); + STerm z = x / y; + + x == a; + y == b; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[z], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(c), xvals); +} + +TEST(BVTerm, shr) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = a >> 5; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x >> 5; + + x == a; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), xvals); +} + +TEST(BVTerm, shl) +{ + uint32_t a = engine.get_random_uint32(); + uint32_t b = a << 5; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x << 5; + + x == a; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), xvals); +} + +TEST(BVTerm, truncate) +{ + uint32_t a = engine.get_random_uint32(); + unsigned int mask = (1 << 10) - 1; + uint32_t b = a & mask; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x.truncate(9); + + x == a; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), xvals); +} + +TEST(BVTerm, extract_bit) +{ + uint32_t a = engine.get_random_uint32(); + unsigned int mask = (1 << 10); + uint32_t b = a & mask; + b >>= 10; + + uint32_t modulus_base = 16; + uint32_t bitvector_size = 32; + Solver s("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + default_solver_config, + modulus_base, + bitvector_size); + + STerm x = BVVar("x", &s); + STerm y = x.extract_bit(10); + + x == a; + + ASSERT_TRUE(s.check()); + + bb::fr xvals = string_to_fr(s[y], /*base=*/2, /*is_signed=*/false); + ASSERT_EQ(bb::fr(b), xvals); +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/data_structures.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/data_structures.hpp index 95cfe91c5a71..16c78150fe8a 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/terms/data_structures.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/data_structures.hpp @@ -21,12 +21,12 @@ class STuple { STuple() : solver(nullptr) - , term(cvc5::Term()){}; + , term(cvc5::Term()) {}; STuple(const cvc5::Term& term, Solver* s, TermType type = TermType::STuple) : solver(s) , term(term) - , type(type){}; + , type(type) {}; /** * @brief Construct a new STuple object @@ -91,9 +91,7 @@ class STuple { }; template -concept ConstructibleFromTerm = requires(const cvc5::Term& term, Solver* s, TermType type) { - T{ term, s, type }; -}; +concept ConstructibleFromTerm = requires(const cvc5::Term& term, Solver* s, TermType type) { T{ term, s, type }; }; /** * @brief symbolic Array class @@ -121,7 +119,7 @@ template class SymArray { : solver(nullptr) , term(cvc5::Term()) , ind_type(TermType::FFTerm) - , entry_type(TermType::FFTerm){}; + , entry_type(TermType::FFTerm) {}; SymArray(const cvc5::Term& term, Solver* s, TermType type = TermType::SymArray) : solver(s) @@ -300,12 +298,12 @@ template class SymSet { SymSet() : solver(nullptr) , term(cvc5::Term()) - , entry_type(TermType::FFTerm){}; + , entry_type(TermType::FFTerm) {}; SymSet(const cvc5::Term& term, Solver* s, TermType type = TermType::SymSet) : solver(s) , term(term) - , type(type){}; + , type(type) {}; /** * @brief Construct a new empty Symbolic Set object diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/iterm.test.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/iterm.test.cpp index f5a6384977de..3fc8cdb44dec 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/terms/iterm.test.cpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/iterm.test.cpp @@ -1,7 +1,6 @@ #include #include "barretenberg/smt_verification/util/smt_util.hpp" -#include "barretenberg/stdlib/primitives/uint/uint.hpp" #include "term.hpp" #include diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/terms/term.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/terms/term.hpp index 12258cdfaedc..f6b0156a9169 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/terms/term.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/terms/term.hpp @@ -126,13 +126,13 @@ class STerm { STerm() : solver(nullptr) , term(cvc5::Term()) - , type(TermType::FFTerm){}; + , type(TermType::FFTerm) {}; STerm(const cvc5::Term& term, Solver* s, TermType type) : solver(s) , term(term) , type(type) - , operations(typed_operations.at(type)){}; + , operations(typed_operations.at(type)) {}; explicit STerm( const std::string& t, Solver* slv, bool isconst = false, uint32_t base = 16, TermType type = TermType::FFTerm); diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.cpp b/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.cpp index d87a1e3b2cfd..953b6e83a4b1 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.cpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.cpp @@ -346,9 +346,9 @@ void fix_range_lists(bb::UltraCircuitBuilder& builder) for (auto list : builder.range_lists) { uint64_t num_multiples_of_three = (list.first / bb::UltraCircuitBuilder::DEFAULT_PLOOKUP_RANGE_STEP_SIZE); for (uint64_t i = 0; i <= num_multiples_of_three; i++) { - builder.variables[list.second.variable_indices[i]] = - i * bb::UltraCircuitBuilder::DEFAULT_PLOOKUP_RANGE_STEP_SIZE; + builder.set_variable(list.second.variable_indices[i], + i * bb::UltraCircuitBuilder::DEFAULT_PLOOKUP_RANGE_STEP_SIZE); } - builder.variables[list.second.variable_indices[num_multiples_of_three + 1]] = list.first; + builder.set_variable(list.second.variable_indices[num_multiples_of_three + 1], list.first); } -} +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.hpp b/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.hpp index 3ae15f140666..f3e48f3a655d 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.hpp +++ b/barretenberg/cpp/src/barretenberg/smt_verification/util/smt_util.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include "barretenberg/circuit_checker/circuit_checker.hpp" #include "barretenberg/smt_verification/circuit/circuit_base.hpp" #define RED "\033[31m" diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/solidity_helpers/CMakeLists.txt index c438ef4c94f8..0f32cdb7aaee 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/CMakeLists.txt @@ -6,6 +6,7 @@ if (NOT(FUZZING)) target_link_libraries( honk_solidity_key_gen + PRIVATE stdlib_solidity_helpers ) @@ -13,6 +14,29 @@ if (NOT(FUZZING)) target_link_libraries( honk_solidity_proof_gen + PRIVATE stdlib_solidity_helpers ) + if(ENABLE_STACKTRACES) + target_link_libraries( + honk_solidity_key_gen + PUBLIC + Backward::Interface + ) + target_link_options( + honk_solidity_key_gen + PRIVATE + -ldw -lelf + ) + target_link_libraries( + honk_solidity_proof_gen + PUBLIC + Backward::Interface + ) + target_link_options( + honk_solidity_proof_gen + PRIVATE + -ldw -lelf + ) + endif() endif() diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp index 8cac694737d0..d8dc9714fe14 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp @@ -36,7 +36,10 @@ class EcdsaCircuit { } // This is the message that we would like to confirm - std::string message_string = "goblin"; + std::string message_string(NUM_PUBLIC_INPUTS, '\0'); + for (size_t i = 0; i < NUM_PUBLIC_INPUTS; ++i) { + message_string[i] = static_cast(static_cast(public_inputs[i])); + } auto message = typename curve::byte_array_ct(&builder, message_string); // Assert that the public inputs buffer matches the message we want @@ -67,12 +70,13 @@ class EcdsaCircuit { std::vector rr(signature.r.begin(), signature.r.end()); std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { signature.v }; // IN CIRCUIT: create a witness with the sig in our circuit stdlib::ecdsa_signature sig{ typename curve::byte_array_ct(&builder, rr), - typename curve::byte_array_ct(&builder, ss), - typename curve::byte_array_ct(&builder, vv) }; + typename curve::byte_array_ct(&builder, ss) }; + + stdlib::byte_array hashed_message = + static_cast>(stdlib::SHA256::hash(input_buffer)); // IN CIRCUIT: verify the signature typename curve::bool_ct signature_result = stdlib::ecdsa_verify_signature( - // input_buffer, public_key, sig); - input_buffer, + // hashed_message, public_key, sig); + hashed_message, public_key, sig); - // Assert the signature is true, we hash the message inside the verify sig stdlib call - bool_ct is_true = bool_ct(true); - signature_result.must_imply(is_true, "signature verification failed"); + // Assert the signature is true + signature_result.assert_equal(bool_ct(true)); return builder; } diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/recursive_circuit.hpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/recursive_circuit.hpp index 8d49fe834c1b..a853f89e2999 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/recursive_circuit.hpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/recursive_circuit.hpp @@ -15,7 +15,7 @@ class RecursiveCircuit { using InnerProver = bb::UltraProver_; using InnerVerifier = bb::UltraVerifier_; using InnerBuilder = typename InnerFlavor::CircuitBuilder; - using InnerDeciderProvingKey = bb::DeciderProvingKey_; + using InnerProverInstance = bb::ProverInstance_; using InnerCommitment = InnerFlavor::Commitment; using InnerFF = InnerFlavor::FF; using InnerIO = bb::stdlib::recursion::honk::DefaultIO; @@ -70,10 +70,10 @@ class RecursiveCircuit { // Create the outer recursive verifier circuit OuterBuilder outer_circuit; - auto inner_proving_key = std::make_shared(inner_circuit); + auto inner_prover_instance = std::make_shared(inner_circuit); auto inner_verification_key = - std::make_shared(inner_proving_key->get_precomputed()); - InnerProver inner_prover(inner_proving_key, inner_verification_key); + std::make_shared(inner_prover_instance->get_precomputed()); + InnerProver inner_prover(inner_prover_instance, inner_verification_key); auto inner_proof = inner_prover.construct_proof(); auto stdlib_vk_and_hash = diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_key_gen.cpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_key_gen.cpp index 317e8b2e3b4e..b548752135cd 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_key_gen.cpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_key_gen.cpp @@ -15,21 +15,21 @@ using namespace bb; -using DeciderProvingKey = DeciderProvingKey_; +using ProverInstance = ProverInstance_; using VerificationKey = UltraKeccakFlavor::VerificationKey; template void generate_keys_honk(const std::string& output_path, std::string circuit_name) { - uint256_t public_inputs[4] = { 0, 0, 0, 0 }; + uint256_t public_inputs[6] = { 0, 0, 0, 0, 0, 0 }; UltraCircuitBuilder builder = Circuit::generate(public_inputs); if constexpr (!std::same_as) { stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); } - auto proving_key = std::make_shared(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - UltraKeccakProver prover(proving_key, verification_key); + auto prover_instance = std::make_shared(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + UltraKeccakProver prover(prover_instance, verification_key); // Make verification key file upper case circuit_name.at(0) = static_cast(std::toupper(static_cast(circuit_name.at(0)))); diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_proof_gen.cpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_proof_gen.cpp index a75e2f6168bd..ffe1927482a2 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_proof_gen.cpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/honk_proof_gen.cpp @@ -21,7 +21,7 @@ using numeric::uint256_t; // Get rid of the inner typename template void generate_proof(uint256_t inputs[]) { - using DeciderProvingKey = DeciderProvingKey_; + using ProverInstance = ProverInstance_; using VerificationKey = typename Flavor::VerificationKey; using Prover = UltraProver_; using Verifier = UltraVerifier_; @@ -35,7 +35,7 @@ template void generate_proof(uint256_t input stdlib::recursion::PairingPoints::add_default_to_public_inputs(builder); } - auto instance = std::make_shared(builder); + auto instance = std::make_shared(builder); auto verification_key = std::make_shared(instance->get_precomputed()); Prover prover(instance, verification_key); Verifier verifier(verification_key); diff --git a/barretenberg/cpp/src/barretenberg/srs/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/srs/CMakeLists.txt index 3f190cc69943..b3e704f74963 100644 --- a/barretenberg/cpp/src/barretenberg/srs/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/srs/CMakeLists.txt @@ -1 +1 @@ -barretenberg_module(srs polynomials) +barretenberg_module(srs polynomials httplib_headers crypto_sha256) diff --git a/barretenberg/cpp/src/barretenberg/srs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/srs/c_bind.cpp index 1f602b841ad5..c6b5bea8228f 100644 --- a/barretenberg/cpp/src/barretenberg/srs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/c_bind.cpp @@ -1,6 +1,7 @@ #include "c_bind.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/srs/factories/bn254_crs_data.hpp" #include "global_crs.hpp" #include #include @@ -15,14 +16,14 @@ using namespace bb; * We are not passed a vector (length prefixed), but the buffer and num points independently. * Saves on having the generate the vector awkwardly calling side after downloading crs. */ -WASM_EXPORT void srs_init_srs(uint8_t const* points_buf, uint32_t const* num_points_buf, uint8_t const* g2_point_buf) +WASM_EXPORT void srs_init_srs(uint8_t const* points_buf, uint32_t const* num_points_buf) { auto num_points = ntohl(*num_points_buf); auto g1_points = std::vector(num_points); for (size_t i = 0; i < num_points; ++i) { g1_points[i] = from_buffer(points_buf, i * 64); } - auto g2_point = from_buffer(g2_point_buf); + auto g2_point = bb::srs::get_bn254_g2_crs_element(); bb::srs::init_bn254_mem_crs_factory(g1_points, g2_point); } diff --git a/barretenberg/cpp/src/barretenberg/srs/c_bind.hpp b/barretenberg/cpp/src/barretenberg/srs/c_bind.hpp index cd33e478e05c..14ec2bdc687d 100644 --- a/barretenberg/cpp/src/barretenberg/srs/c_bind.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/c_bind.hpp @@ -1,5 +1,5 @@ #include #include -WASM_EXPORT void srs_init_srs(uint8_t const* points_buf, uint32_t const* num_points, uint8_t const* g2_point_buf); +WASM_EXPORT void srs_init_srs(uint8_t const* points_buf, uint32_t const* num_points); WASM_EXPORT void srs_init_grumpkin_srs(uint8_t const* points_buf, uint32_t const* num_points); \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp new file mode 100644 index 000000000000..135c6c90dc91 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp @@ -0,0 +1,53 @@ +#pragma once +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/ecc/curves/bn254/g1.hpp" +#include "barretenberg/ecc/curves/bn254/g2.hpp" + +namespace bb::srs { + +/** + * @brief Expected first G1 element from BN254 CRS + * @details The first element of the G1 CRS is the standard BN254 G1 generator point (1, 2). + * This is used to verify the integrity of downloaded CRS files. + */ +inline constexpr g1::affine_element BN254_G1_FIRST_ELEMENT = g1::affine_one; + +/** + * @brief Expected second G1 element from BN254 CRS + * @details This is the second point in the BN254 CRS, corresponding to tau * G where tau is the secret from the + * trusted setup. Reference: http://crs.aztec.network/g1.dat (bytes 64-127) + */ +inline g1::affine_element get_bn254_g1_second_element() +{ + // Hardcoded second G1 element (64 bytes) - see reference URL above + static constexpr uint8_t g1_second_data[64] = { 0x2d, 0x36, 0x06, 0x28, 0x28, 0x9f, 0xf9, 0x43, 0xff, 0x6b, 0xd1, + 0xa8, 0x7b, 0xbe, 0x4e, 0x62, 0xab, 0xe7, 0xfb, 0x61, 0xba, 0x83, + 0xef, 0xfd, 0x26, 0x6f, 0x22, 0xbd, 0xcf, 0x31, 0xe6, 0xf9, 0x26, + 0xb9, 0x2a, 0x79, 0xe5, 0x63, 0xc3, 0xf4, 0x82, 0x52, 0xcc, 0xe7, + 0xfe, 0xec, 0xa2, 0xf0, 0xf8, 0xd3, 0x3d, 0xcb, 0x4e, 0xf7, 0xb0, + 0x64, 0x3b, 0xf0, 0x7b, 0xd4, 0x05, 0x70, 0x0a, 0xaa }; + return from_buffer(g1_second_data); +} + +/** + * @brief Reference BN254 G2 element from the trusted setup CRS + * @details This is the single G2 point used in the BN254 CRS for verification. + * Reference: http://crs.aztec.network/g2.dat + */ +inline g2::affine_element get_bn254_g2_crs_element() +{ + // Hardcoded G2 element (128 bytes) - see reference URL above + static constexpr uint8_t g2_data[128] = { + 0x01, 0x18, 0xc4, 0xd5, 0xb8, 0x37, 0xbc, 0xc2, 0xbc, 0x89, 0xb5, 0xb3, 0x98, 0xb5, 0x97, 0x4e, + 0x9f, 0x59, 0x44, 0x07, 0x3b, 0x32, 0x07, 0x8b, 0x7e, 0x23, 0x1f, 0xec, 0x93, 0x88, 0x83, 0xb0, + 0x26, 0x0e, 0x01, 0xb2, 0x51, 0xf6, 0xf1, 0xc7, 0xe7, 0xff, 0x4e, 0x58, 0x07, 0x91, 0xde, 0xe8, + 0xea, 0x51, 0xd8, 0x7a, 0x35, 0x8e, 0x03, 0x8b, 0x4e, 0xfe, 0x30, 0xfa, 0xc0, 0x93, 0x83, 0xc1, + 0x22, 0xfe, 0xbd, 0xa3, 0xc0, 0xc0, 0x63, 0x2a, 0x56, 0x47, 0x5b, 0x42, 0x14, 0xe5, 0x61, 0x5e, + 0x11, 0xe6, 0xdd, 0x3f, 0x96, 0xe6, 0xce, 0xa2, 0x85, 0x4a, 0x87, 0xd4, 0xda, 0xcc, 0x5e, 0x55, + 0x04, 0xfc, 0x63, 0x69, 0xf7, 0x11, 0x0f, 0xe3, 0xd2, 0x51, 0x56, 0xc1, 0xbb, 0x9a, 0x72, 0x85, + 0x9c, 0xf2, 0xa0, 0x46, 0x41, 0xf9, 0x9b, 0xa4, 0xee, 0x41, 0x3c, 0x80, 0xda, 0x6a, 0x5f, 0xe4 + }; + return from_buffer(g2_data); +} + +} // namespace bb::srs diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp index 62d6a4ab2022..396405a87059 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp @@ -1,33 +1,39 @@ #include "get_bn254_crs.hpp" -#include "barretenberg/api/exec_pipe.hpp" #include "barretenberg/api/file_io.hpp" +#include "barretenberg/common/flock.hpp" +#include "barretenberg/common/serialize.hpp" #include "barretenberg/ecc/curves/bn254/g1.hpp" +#include "barretenberg/ecc/curves/bn254/g2.hpp" +#include "bn254_crs_data.hpp" +#include "http_download.hpp" namespace { std::vector download_bn254_g1_data(size_t num_points) { - size_t g1_end = num_points * sizeof(bb::g1::affine_element) - 1; + size_t g1_end = (num_points * sizeof(bb::g1::affine_element)) - 1; - std::string url = "https://crs.aztec.network/g1.dat"; + // Download via HTTP with Range header + auto data = bb::srs::http_download("http://crs.aztec.network/g1.dat", 0, g1_end); - // IMPORTANT: this currently uses a shell, DO NOT let user-controlled strings here. - std::string command = "curl -H \"Range: bytes=0-" + std::to_string(g1_end) + "\" '" + url + "'"; + if (data.size() < sizeof(bb::g1::affine_element)) { + throw_or_abort("Downloaded g1 data is too small"); + } - auto data = bb::exec_pipe(command); - // Header + num_points * sizeof point. - if (data.size() < g1_end) { - throw_or_abort("Failed to download g1 data."); + // Verify first element matches our expected point. + auto first_element = from_buffer(data, 0); + if (first_element != bb::srs::BN254_G1_FIRST_ELEMENT) { + throw_or_abort("Downloaded BN254 G1 CRS first element does not match expected point."); } - return data; -} + // Verify second element if we have enough data + if (data.size() >= 2 * sizeof(bb::g1::affine_element)) { + auto second_element = from_buffer(data, sizeof(bb::g1::affine_element)); + if (second_element != bb::srs::get_bn254_g1_second_element()) { + throw_or_abort("Downloaded BN254 G1 CRS second element does not match expected point."); + } + } -std::vector download_bn254_g2_data() -{ - std::string url = "https://crs.aztec.network/g2.dat"; - // IMPORTANT: this currently uses a shell, DO NOT let user-controlled strings here. - std::string command = "curl '" + url + "'"; - return bb::exec_pipe(command); + return data; } } // namespace @@ -36,11 +42,13 @@ std::vector get_bn254_g1_data(const std::filesystem::path& p size_t num_points, bool allow_download) { - // TODO(AD): per Charlie this should just download and replace the flat file portion atomically so we have no race - // condition std::filesystem::create_directories(path); auto g1_path = path / "bn254_g1.dat"; + auto lock_path = path / "crs.lock"; + // Acquire exclusive lock to prevent simultaneous downloads + FileLockGuard lock(lock_path.string()); + size_t g1_downloaded_points = get_file_size(g1_path) / sizeof(g1::affine_element); if (g1_downloaded_points >= num_points) { @@ -62,6 +70,19 @@ std::vector get_bn254_g1_data(const std::filesystem::path& p num_points, " were requested but download not allowed in this context")); } + + // Double-check after acquiring lock (another process may have downloaded while we waited) + g1_downloaded_points = get_file_size(g1_path) / sizeof(g1::affine_element); + if (g1_downloaded_points >= num_points) { + vinfo("using cached bn254 crs with num points ", std::to_string(g1_downloaded_points), " at ", g1_path); + auto data = read_file(g1_path, num_points * sizeof(g1::affine_element)); + auto points = std::vector(num_points); + for (size_t i = 0; i < num_points; ++i) { + points[i] = from_buffer(data, i * sizeof(g1::affine_element)); + } + return points; + } + vinfo("downloading bn254 crs..."); auto data = download_bn254_g1_data(num_points); write_file(g1_path, data); @@ -73,22 +94,4 @@ std::vector get_bn254_g1_data(const std::filesystem::path& p return points; } -g2::affine_element get_bn254_g2_data(const std::filesystem::path& path, bool allow_download) -{ - std::filesystem::create_directories(path); - - auto g2_path = path / "bn254_g2.dat"; - size_t g2_file_size = get_file_size(g2_path); - - if (g2_file_size == sizeof(g2::affine_element)) { - auto data = read_file(g2_path); - return from_buffer(data.data()); - } - if (!allow_download) { - throw_or_abort("bn254 g2 data not found and download not allowed in this context"); - } - auto data = download_bn254_g2_data(); - write_file(g2_path, data); - return from_buffer(data.data()); -} } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/get_grumpkin_crs.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/get_grumpkin_crs.cpp index a54633b3c9a2..5a41a8ab2e59 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/get_grumpkin_crs.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/get_grumpkin_crs.cpp @@ -1,38 +1,25 @@ #include "get_grumpkin_crs.hpp" -#include "barretenberg/api/exec_pipe.hpp" #include "barretenberg/api/file_io.hpp" +#include "barretenberg/common/flock.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/common/try_catch_shim.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" - -namespace { -std::vector download_grumpkin_g1_data(size_t num_points) -{ - size_t g1_end = num_points * sizeof(bb::curve::Grumpkin::AffineElement) - 1; - std::string url = "https://crs.aztec.network/grumpkin_g1.dat"; - - // IMPORTANT: this currently uses a shell, DO NOT let user-controlled strings here. - std::string command = "curl -s -H \"Range: bytes=0-" + std::to_string(g1_end) + "\" '" + url + "'"; - - auto data = bb::exec_pipe(command); - if (data.size() < g1_end) { - THROW std::runtime_error("Failed to download grumpkin g1 data."); - } - - return data; -} -} // namespace +#include "grumpkin_srs_gen.hpp" namespace bb { std::vector get_grumpkin_g1_data(const std::filesystem::path& path, size_t num_points, bool allow_download) { - // TODO(AD): per Charlie this should just download and replace the flat file portion atomically so we have no race - // condition std::filesystem::create_directories(path); + auto g1_path = path / "grumpkin_g1.flat.dat"; + auto lock_path = path / "crs.lock"; + // Acquire exclusive lock to prevent simultaneous generation/writes + FileLockGuard lock(lock_path.string()); + size_t g1_downloaded_points = get_file_size(g1_path) / sizeof(curve::Grumpkin::AffineElement); + if (g1_downloaded_points >= num_points) { vinfo("using cached grumpkin crs with num points ", g1_downloaded_points, " at: ", g1_path); auto data = read_file(g1_path, num_points * sizeof(curve::Grumpkin::AffineElement)); @@ -44,23 +31,20 @@ std::vector get_grumpkin_g1_data(const std::file return points; } } + if (!allow_download && g1_downloaded_points == 0) { - throw_or_abort("grumpkin g1 data not found and download not allowed in this context"); + throw_or_abort("grumpkin g1 data not found and generation not allowed in this context"); } else if (!allow_download) { throw_or_abort(format("grumpkin g1 data had ", g1_downloaded_points, " points and ", num_points, - " were requested but download not allowed in this context")); + " were requested but generation not allowed in this context")); } - vinfo("downloading grumpkin crs..."); - auto data = download_grumpkin_g1_data(num_points); - write_file(path / "grumpkin_g1.flat.dat", data); - std::vector points(num_points); - for (uint32_t i = 0; i < num_points; ++i) { - points[i] = from_buffer(data, i * sizeof(curve::Grumpkin::AffineElement)); - } + vinfo("generating grumpkin crs..."); + auto points = srs::generate_grumpkin_srs(num_points); + write_file(g1_path, to_buffer(points)); return points; } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.cpp new file mode 100644 index 000000000000..afdb5229ba93 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.cpp @@ -0,0 +1,59 @@ +#include "grumpkin_srs_gen.hpp" +#include "barretenberg/common/net.hpp" +#include "barretenberg/common/thread.hpp" +#include "barretenberg/crypto/sha256/sha256.hpp" + +namespace { +const std::string protocol_name = "BARRETENBERG_GRUMPKIN_IPA_CRS"; +} + +namespace bb::srs { + +std::vector generate_grumpkin_srs(size_t num_points) +{ + std::vector srs(num_points); + + parallel_for_range(num_points, [&](size_t start, size_t end) { + std::vector hash_input; + for (size_t point_idx = start; point_idx < end; ++point_idx) { + bool rational_point_found = false; + size_t attempt = 0; + while (!rational_point_found) { + hash_input.clear(); + // We hash + // |BARRETENBERG_GRUMPKIN_IPA_CRS|POINT_INDEX_IN_LITTLE_ENDIAN|POINT_ATTEMPT_INDEX_IN_LITTLE_ENDIAN| + std::copy(protocol_name.begin(), protocol_name.end(), std::back_inserter(hash_input)); + uint64_t point_index_le_order = htonll(static_cast(point_idx)); + uint64_t point_attempt_le_order = htonll(static_cast(attempt)); + hash_input.insert(hash_input.end(), + reinterpret_cast(&point_index_le_order), + reinterpret_cast(&point_index_le_order) + sizeof(uint64_t)); + hash_input.insert(hash_input.end(), + reinterpret_cast(&point_attempt_le_order), + reinterpret_cast(&point_attempt_le_order) + sizeof(uint64_t)); + auto hash_result = crypto::sha256(hash_input); + uint256_t hash_result_uint( + ntohll(*reinterpret_cast(hash_result.data())), + ntohll(*reinterpret_cast(hash_result.data() + sizeof(uint64_t))), + ntohll(*reinterpret_cast(hash_result.data() + 2 * sizeof(uint64_t))), + ntohll(*reinterpret_cast(hash_result.data() + 3 * sizeof(uint64_t)))); + // We try to get a point from the resulting hash + auto crs_element = grumpkin::g1::affine_element::from_compressed(hash_result_uint); + // If the points coordinates are (0,0) then the compressed representation didn't land on an actual point + // (happens half of the time) and we need to continue searching + if (!crs_element.x.is_zero() || !crs_element.y.is_zero()) { + rational_point_found = true; + // Note: there used to be a mutex here, however there is no need as this is just a write to a + // computed (exclusive to this thread) memory location + srs.at(point_idx) = static_cast(crs_element); + break; + } + attempt += 1; + } + } + }); + + return srs; +} + +} // namespace bb::srs diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.hpp new file mode 100644 index 000000000000..ffdd210aa3e5 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/srs/factories/grumpkin_srs_gen.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include + +namespace bb::srs { + +/** + * @brief Generates a monomial basis Grumpkin SRS on-the-fly. + * + * @details The Grumpkin SRS does not require a trusted setup and has no underlying secret generator. + * Points are generated deterministically by hashing a protocol string with point indices. + * ! Note that the first element will not be equal to the generator point defined in grumpkin.hpp. + * + * @param num_points The number of SRS points to generate + * @return std::vector The generated SRS points + */ +std::vector generate_grumpkin_srs(size_t num_points); + +} // namespace bb::srs diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp new file mode 100644 index 000000000000..91c3d117b927 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp @@ -0,0 +1,86 @@ +#pragma once +#include "barretenberg/common/throw_or_abort.hpp" +#include + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-literal-operator" +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif +#ifndef __wasm__ +#include +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#include +#include + +namespace bb::srs { + +/** + * @brief Download data from a URL with optional Range header support + * @param url Full URL (e.g., "http://crs.aztec.network/g1.dat") + * @param start_byte Starting byte for range request (0 for no range) + * @param end_byte Ending byte for range request (0 for no range) + * @return Downloaded data as bytes + */ +inline std::vector http_download([[maybe_unused]] const std::string& url, + [[maybe_unused]] size_t start_byte = 0, + [[maybe_unused]] size_t end_byte = 0) +{ +#ifdef __wasm__ + throw_or_abort("HTTP download not supported in WASM"); +#else + // Parse URL into host and path + size_t proto_end = url.find("://"); + if (proto_end == std::string::npos) { + throw_or_abort("Invalid URL format: " + url); + } + + size_t host_start = proto_end + 3; + size_t path_start = url.find('/', host_start); + if (path_start == std::string::npos) { + throw_or_abort("Invalid URL format: " + url); + } + + std::string host = url.substr(host_start, path_start - host_start); + std::string path = url.substr(path_start); + + // Create HTTP client (non-SSL) + httplib::Client cli(("http://" + host).c_str()); + cli.set_follow_location(true); + cli.set_connection_timeout(30); + cli.set_read_timeout(60); + + // Prepare headers + httplib::Headers headers; + if (end_byte > 0 && end_byte >= start_byte) { + headers.emplace("Range", "bytes=" + std::to_string(start_byte) + "-" + std::to_string(end_byte)); + } + + // Download + auto res = cli.Get(path.c_str(), headers); + + if (!res) { + throw_or_abort("HTTP request failed for " + url + ": " + httplib::to_string(res.error())); + } + + if (res->status != 200 && res->status != 206) { + throw_or_abort("HTTP request failed for " + url + " with status " + std::to_string(res->status)); + } + + // Convert string body to vector + const std::string& body = res->body; + return std::vector(body.begin(), body.end()); +#endif +} +} // namespace bb::srs diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/mem_bn254_crs_factory.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/mem_bn254_crs_factory.cpp index 4b8a44bd57d9..1247617063bc 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/mem_bn254_crs_factory.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/mem_bn254_crs_factory.cpp @@ -1,5 +1,5 @@ #include "mem_bn254_crs_factory.hpp" -#include "barretenberg/common/op_count.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/ecc/curves/bn254/pairing.hpp" diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.cpp index 22e9ce711f12..4b835903dc9f 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.cpp @@ -1,4 +1,5 @@ #include "native_crs_factory.hpp" +#include "barretenberg/srs/factories/bn254_crs_data.hpp" #include "barretenberg/srs/factories/get_bn254_crs.hpp" #include "barretenberg/srs/factories/get_grumpkin_crs.hpp" #include "barretenberg/srs/factories/mem_bn254_crs_factory.hpp" @@ -16,7 +17,7 @@ namespace bb::srs::factories { MemBn254CrsFactory init_bn254_crs(const std::filesystem::path& path, size_t dyadic_circuit_size, bool allow_download) { auto bn254_g1_data = get_bn254_g1_data(path, dyadic_circuit_size, allow_download); - auto bn254_g2_data = get_bn254_g2_data(path); + auto bn254_g2_data = srs::get_bn254_g2_crs_element(); return { bn254_g1_data, bn254_g2_data }; } diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.hpp index 0704696ac527..1310a44bf301 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/native_crs_factory.hpp @@ -5,6 +5,9 @@ #include "barretenberg/srs/factories/mem_grumpkin_crs_factory.hpp" #include #include +#ifndef NO_MULTITHREADING +#include +#endif namespace bb::srs::factories { @@ -40,6 +43,9 @@ class NativeBn254CrsFactory : public CrsFactory { {} std::shared_ptr> get_crs(size_t degree) override { +#ifndef NO_MULTITHREADING + std::lock_guard lock(mutex_); +#endif if (degree > last_degree_ || mem_crs_ == nullptr) { mem_crs_ = std::make_shared(init_bn254_crs(path_, degree, allow_download_)); last_degree_ = degree; @@ -52,6 +58,9 @@ class NativeBn254CrsFactory : public CrsFactory { bool allow_download_ = true; size_t last_degree_ = 0; std::shared_ptr mem_crs_; +#ifndef NO_MULTITHREADING + std::mutex mutex_; +#endif }; class NativeGrumpkinCrsFactory : public CrsFactory { @@ -63,6 +72,9 @@ class NativeGrumpkinCrsFactory : public CrsFactory { std::shared_ptr> get_crs(size_t degree) override { +#ifndef NO_MULTITHREADING + std::lock_guard lock(mutex_); +#endif if (degree > last_degree_ || mem_crs_ == nullptr) { mem_crs_ = std::make_unique(init_grumpkin_crs(path_, degree, allow_download_)); last_degree_ = degree; @@ -75,6 +87,9 @@ class NativeGrumpkinCrsFactory : public CrsFactory { bool allow_download_ = true; size_t last_degree_ = 0; std::unique_ptr mem_crs_; +#ifndef NO_MULTITHREADING + std::mutex mutex_; +#endif }; } // namespace bb::srs::factories diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp index 408af4430efd..be6e900b7425 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp @@ -18,23 +18,22 @@ ClientIVCRecursiveVerifier::Output ClientIVCRecursiveVerifier::verify(const Stdl { using MergeCommitments = GoblinVerifier::MergeVerifier::InputCommitments; std::shared_ptr civc_rec_verifier_transcript(std::make_shared()); - // Construct stdlib Mega verification key - auto stdlib_mega_vk_and_hash = std::make_shared(*builder, ivc_verification_key.mega); // Perform recursive decider verification - MegaVerifier verifier{ builder.get(), stdlib_mega_vk_and_hash, civc_rec_verifier_transcript }; + MegaVerifier verifier{ builder, stdlib_mega_vk_and_hash, civc_rec_verifier_transcript }; MegaVerifier::Output mega_output = verifier.template verify_proof>(proof.mega_proof); // Perform Goblin recursive verification GoblinVerificationKey goblin_verification_key{}; MergeCommitments merge_commitments{ - .t_commitments = verifier.key->witness_commitments.get_ecc_op_wires() + .t_commitments = verifier.verifier_instance->witness_commitments.get_ecc_op_wires() .get_copy(), // Commitments to subtables added by the hiding kernel .T_prev_commitments = std::move(mega_output.ecc_op_tables) // Commitments to the state of the ecc op_queue as // computed insided the hiding kernel }; - GoblinVerifier goblin_verifier{ builder.get(), goblin_verification_key, civc_rec_verifier_transcript }; - GoblinRecursiveVerifierOutput output = goblin_verifier.verify(proof.goblin_proof, merge_commitments); + GoblinVerifier goblin_verifier{ builder, goblin_verification_key, civc_rec_verifier_transcript }; + GoblinRecursiveVerifierOutput output = + goblin_verifier.verify(proof.goblin_proof, merge_commitments, MergeSettings::APPEND); output.points_accumulator.aggregate(mega_output.points_accumulator); // TODO(https://github.com/AztecProtocol/barretenberg/issues/1396): State tracking in CIVC verifiers return { output }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp index 917eeb88612f..c97a5fe94a81 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp @@ -13,25 +13,35 @@ namespace bb::stdlib::recursion::honk { class ClientIVCRecursiveVerifier { using Builder = UltraCircuitBuilder; // The circuit will be an Ultra circuit using RecursiveFlavor = MegaZKRecursiveFlavor_; // The hiding circuit verifier algorithm is MegaZK - using RecursiveDeciderVerificationKeys = RecursiveDeciderVerificationKeys_; - using RecursiveDeciderVerificationKey = RecursiveDeciderVerificationKeys::DeciderVK; - using RecursiveVerificationKey = RecursiveDeciderVerificationKeys::VerificationKey; - using RecursiveVKAndHash = RecursiveDeciderVerificationKeys::VKAndHash; - using FoldingVerifier = ProtogalaxyRecursiveVerifier_; + using RecursiveVerifierInstance = RecursiveVerifierInstance_; + using RecursiveVerificationKey = RecursiveVerifierInstance::VerificationKey; + using FoldingVerifier = ProtogalaxyRecursiveVerifier_; using MegaVerifier = UltraRecursiveVerifier_; using GoblinVerifier = GoblinRecursiveVerifier; using Flavor = RecursiveFlavor::NativeFlavor; using VerificationKey = Flavor::VerificationKey; - using IVCVerificationKey = ClientIVC::VerificationKey; using Transcript = GoblinRecursiveVerifier::Transcript; public: using GoblinVerificationKey = Goblin::VerificationKey; using Output = GoblinRecursiveVerifierOutput; + using RecursiveVKAndHash = RecursiveVerifierInstance::VKAndHash; + using RecursiveVK = RecursiveFlavor::VerificationKey; struct StdlibProof { using StdlibHonkProof = bb::stdlib::Proof; using StdlibGoblinProof = GoblinRecursiveVerifier::StdlibProof; + + static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = Flavor::VIRTUAL_LOG_N) + { + return bb::ClientIVC::Proof::PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n); + } + + static constexpr size_t PROOF_LENGTH(size_t virtual_log_n = Flavor::VIRTUAL_LOG_N) + { + return bb::ClientIVC::Proof::PROOF_LENGTH(virtual_log_n); + } + StdlibHonkProof mega_proof; // proof of the hiding circuit StdlibGoblinProof goblin_proof; @@ -39,16 +49,72 @@ class ClientIVCRecursiveVerifier { : mega_proof(builder, proof.mega_proof) , goblin_proof(builder, proof.goblin_proof) {} + + /** + * @brief Construct a new Stdlib Proof object from indices in a builder + * + * @param proof_indices + * @param virtual_log_n + */ + StdlibProof(const std::vector>& proof_indices, + size_t public_inputs_size, + size_t virtual_log_n = Flavor::VIRTUAL_LOG_N) + { + + BB_ASSERT_EQ(proof_indices.size(), + PROOF_LENGTH(virtual_log_n) + public_inputs_size, + "Number of indices differs from the expected proof size."); + + auto it = proof_indices.begin(); + + // Mega proof + std::ptrdiff_t start_idx = 0; + std::ptrdiff_t end_idx = static_cast( + RecursiveFlavor::NativeFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n) + + HidingKernelIO::PUBLIC_INPUTS_SIZE + public_inputs_size); + mega_proof.insert(mega_proof.end(), it + start_idx, it + end_idx); + + // Merge proof + start_idx = end_idx; + end_idx += static_cast(MERGE_PROOF_SIZE); + goblin_proof.merge_proof.insert(goblin_proof.merge_proof.end(), it + start_idx, it + end_idx); + + // ECCVM pre-ipa proof + start_idx = end_idx; + end_idx += static_cast(ECCVMFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS - IPA_PROOF_LENGTH); + goblin_proof.eccvm_proof.pre_ipa_proof.insert( + goblin_proof.eccvm_proof.pre_ipa_proof.end(), it + start_idx, it + end_idx); + + // ECCVM ipa proof + start_idx = end_idx; + end_idx += static_cast(IPA_PROOF_LENGTH); + goblin_proof.eccvm_proof.ipa_proof.insert( + goblin_proof.eccvm_proof.ipa_proof.end(), it + start_idx, it + end_idx); + + // Translator proof + start_idx = end_idx; + end_idx += static_cast(TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS); + goblin_proof.translator_proof.insert(goblin_proof.translator_proof.end(), it + start_idx, it + end_idx); + + BB_ASSERT_EQ(static_cast(end_idx), + PROOF_LENGTH(virtual_log_n) + public_inputs_size, + "Reconstructed a ClientIVC proof of wrong the length from proof indices."); + } }; - ClientIVCRecursiveVerifier(const std::shared_ptr& builder, IVCVerificationKey& ivc_verification_key) + ClientIVCRecursiveVerifier(Builder* builder, const std::shared_ptr& native_mega_vk) + : builder(builder) + , stdlib_mega_vk_and_hash(std::make_shared(*builder, native_mega_vk)) {}; + + ClientIVCRecursiveVerifier(Builder* builder, const std::shared_ptr& stdlib_mega_vk_and_hash) : builder(builder) - , ivc_verification_key(ivc_verification_key){}; + , stdlib_mega_vk_and_hash(stdlib_mega_vk_and_hash) {}; [[nodiscard("IPA claim and Pairing points should be accumulated")]] Output verify(const StdlibProof&); private: - std::shared_ptr builder; - IVCVerificationKey ivc_verification_key; + Builder* builder; + // VK and hash of the hiding kernel + std::shared_ptr stdlib_mega_vk_and_hash; }; } // namespace bb::stdlib::recursion::honk diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp index 8aadc42920da..39b62ff086b5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp @@ -32,12 +32,14 @@ class ClientIVCRecursionTests : public testing::Test { * @brief Construct a genuine ClientIVC prover output based on accumulation of an arbitrary set of mock circuits * */ - static ClientIVCProverOutput construct_client_ivc_prover_output(ClientIVC& ivc) + static ClientIVCProverOutput construct_client_ivc_prover_output(const size_t num_app_circuits = 1) { // Construct and accumulate a series of mocked private function execution circuits - MockCircuitProducer circuit_producer; + MockCircuitProducer circuit_producer{ num_app_circuits }; + const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; + ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; - for (size_t idx = 0; idx < ivc.get_num_circuits(); ++idx) { + for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { circuit_producer.construct_and_accumulate_next_circuit(ivc); } @@ -51,12 +53,10 @@ class ClientIVCRecursionTests : public testing::Test { */ TEST_F(ClientIVCRecursionTests, NativeVerification) { - size_t NUM_CIRCUITS = 2; - ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; - auto [proof, ivc_vk] = construct_client_ivc_prover_output(ivc); + auto [proof, vk] = construct_client_ivc_prover_output(); // Confirm that the IVC proof can be natively verified - EXPECT_TRUE(ivc.verify(proof)); + EXPECT_TRUE(ClientIVC::verify(proof, vk)); } /** @@ -68,147 +68,21 @@ TEST_F(ClientIVCRecursionTests, Basic) using CIVCRecVerifierOutput = ClientIVCRecursiveVerifier::Output; // Generate a genuine ClientIVC prover output - const size_t NUM_CIRCUITS = 2; - - ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; - auto [proof, ivc_vk] = construct_client_ivc_prover_output(ivc); + auto [proof, vk] = construct_client_ivc_prover_output(); // Construct the ClientIVC recursive verifier - auto builder = std::make_shared(); - ClientIVCVerifier verifier{ builder, ivc_vk }; + Builder builder; + ClientIVCVerifier verifier{ &builder, vk.mega }; // Generate the recursive verification circuit - StdlibProof stdlib_proof(*builder, proof); + StdlibProof stdlib_proof(builder, proof); CIVCRecVerifierOutput output = verifier.verify(stdlib_proof); - EXPECT_EQ(builder->failed(), false) << builder->err(); + EXPECT_EQ(builder.failed(), false) << builder.err(); - EXPECT_TRUE(CircuitChecker::check(*builder)); + EXPECT_TRUE(CircuitChecker::check(builder)); // Print the number of gates post finalisation - info("Recursive Verifier: finalised num gates = ", builder->num_gates); -} - -TEST_F(ClientIVCRecursionTests, ClientTubeBase) -{ - using CIVCRecVerifierOutput = ClientIVCRecursiveVerifier::Output; - - // Generate a genuine ClientIVC prover output - const size_t NUM_CIRCUITS = 2; - - ClientIVC ivc{ NUM_CIRCUITS, trace_settings }; - auto [proof, ivc_vk] = construct_client_ivc_prover_output(ivc); - - // Construct the ClientIVC recursive verifier - auto tube_builder = std::make_shared(); - ClientIVCVerifier verifier{ tube_builder, ivc_vk }; - - // Generate the recursive verification circuit - StdlibProof stdlib_proof(*tube_builder, proof); - CIVCRecVerifierOutput client_ivc_rec_verifier_output = verifier.verify(stdlib_proof); - - { - // IO - RollupIO inputs; - inputs.pairing_inputs = client_ivc_rec_verifier_output.points_accumulator; - inputs.ipa_claim = client_ivc_rec_verifier_output.opening_claim; - inputs.set_public(); - } - - // The tube only calls an IPA recursive verifier once, so we can just add this IPA proof - tube_builder->ipa_proof = client_ivc_rec_verifier_output.ipa_proof.get_value(); - - info("ClientIVC Recursive Verifier: num prefinalized gates = ", tube_builder->num_gates); - - EXPECT_EQ(tube_builder->failed(), false) << tube_builder->err(); - - // EXPECT_TRUE(CircuitChecker::check(*tube_builder)); - - // Construct and verify a proof for the ClientIVC Recursive Verifier circuit - auto proving_key = std::make_shared>(*tube_builder); - auto native_vk_with_ipa = std::make_shared(proving_key->get_precomputed()); - UltraProver_ tube_prover{ proving_key, native_vk_with_ipa }; - // Prove the CIVCRecursiveVerifier circuit - auto native_tube_proof = tube_prover.construct_proof(); - - // Natively verify the tube proof - VerifierCommitmentKey ipa_verification_key(1 << CONST_ECCVM_LOG_N); - UltraVerifier_ native_verifier(native_vk_with_ipa, ipa_verification_key); - bool native_result = - native_verifier.template verify_proof(native_tube_proof, tube_prover.proving_key->ipa_proof) - .result; - EXPECT_TRUE(native_result); - - // Construct a base rollup circuit that recursively verifies the tube proof and forwards the IPA proof. - Builder base_builder; - auto tube_vk = std::make_shared(proving_key->get_precomputed()); - auto stdlib_tube_vk_and_hash = std::make_shared(base_builder, tube_vk); - stdlib::Proof base_tube_proof(base_builder, native_tube_proof); - UltraRecursiveVerifier base_verifier{ &base_builder, stdlib_tube_vk_and_hash }; - UltraRecursiveVerifierOutput output = base_verifier.template verify_proof(base_tube_proof); - info("Tube UH Recursive Verifier: num prefinalized gates = ", base_builder.num_gates); - - { - // IO - RollupIO inputs; - inputs.pairing_inputs = output.points_accumulator; - inputs.ipa_claim = output.ipa_claim; - inputs.set_public(); - } - - base_builder.ipa_proof = tube_prover.proving_key->ipa_proof; - EXPECT_EQ(base_builder.failed(), false) << base_builder.err(); - EXPECT_TRUE(CircuitChecker::check(base_builder)); - - // Natively verify the IPA proof for the base rollup circuit - auto base_proving_key = std::make_shared>(base_builder); - auto ipa_transcript = std::make_shared(); - ipa_transcript->load_proof(base_proving_key->ipa_proof); - IPA::reduce_verify( - ipa_verification_key, output.ipa_claim.get_native_opening_claim(), ipa_transcript); -} - -// Ensure that the Client IVC Recursive Verifier Circuit does not depend on the Client IVC input -TEST_F(ClientIVCRecursionTests, TubeVKIndependentOfInputCircuits) -{ - // Retrieves the trace blocks (each consisting of a specific gate) from the recursive verifier circuit - auto get_blocks = [](size_t inner_size) - -> std::tuple> { - ClientIVC ivc{ inner_size, trace_settings }; - - auto [proof, ivc_vk] = construct_client_ivc_prover_output(ivc); - - auto tube_builder = std::make_shared(); - ClientIVCVerifier verifier{ tube_builder, ivc_vk }; - - StdlibProof stdlib_proof(*tube_builder, proof); - auto client_ivc_rec_verifier_output = verifier.verify(stdlib_proof); - - // IO - RollupIO inputs; - inputs.pairing_inputs = client_ivc_rec_verifier_output.points_accumulator; - inputs.ipa_claim = client_ivc_rec_verifier_output.opening_claim; - inputs.set_public(); - - // The tube only calls an IPA recursive verifier once, so we can just add this IPA proof - tube_builder->ipa_proof = client_ivc_rec_verifier_output.ipa_proof.get_value(); - - info("ClientIVC Recursive Verifier: num prefinalized gates = ", tube_builder->num_gates); - - EXPECT_EQ(tube_builder->failed(), false) << tube_builder->err(); - - // Construct and verify a proof for the ClientIVC Recursive Verifier circuit - auto proving_key = std::make_shared>(*tube_builder); - - auto tube_vk = std::make_shared(proving_key->get_precomputed()); - - return { tube_builder->blocks, tube_vk }; - }; - - auto [blocks_2, verification_key_2] = get_blocks(2); - auto [blocks_4, verification_key_4] = get_blocks(4); - - compare_ultra_blocks_and_verification_keys({ blocks_2, blocks_4 }, - { verification_key_2, verification_key_4 }); + info("Recursive Verifier: finalised num gates = ", builder.num_gates); } } // namespace bb::stdlib::recursion::honk diff --git a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.cpp b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.cpp index bebf5ee6b3e4..319590bac583 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.cpp @@ -10,35 +10,30 @@ namespace bb::stdlib { -template -cycle_group pedersen_commitment::commit(const std::vector& inputs, const GeneratorContext context) +/** + * @brief Compute a Pedersen commitment to the provided inputs + * @details Computes `commit(inputs) = sum_i inputs[i] * G_i` where `G_i` are Grumpkin curve generators derived from the + * provided GeneratorContext. The inputs are converted from `field_t` (circuit representation of BN254 scalars) to + * `cycle_scalar` (circuit representation of Grumpkin scalars) in order to perform the batch multiplication. + * + * + * @tparam Builder + * @param inputs Vector of BN254 scalar field elements to commit to + * @param context Generator configuration specifying offset and domain separator for deterministic generator selection + * @return cycle_group The resulting Pedersen commitment as a Grumpkin curve point + */ +template +cycle_group pedersen_commitment::commit(const std::vector& inputs, + const GeneratorContext context) { - - using cycle_scalar = typename cycle_group::cycle_scalar; - const auto base_points = context.generators->get(inputs.size(), context.offset, context.domain_separator); std::vector scalars; std::vector points; - for (size_t i = 0; i < inputs.size(); ++i) { - scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(inputs[i])); - // constructs circuit-constant cycle_group objects (non-witness) - points.emplace_back(base_points[i]); - } - - return cycle_group::batch_mul(points, scalars); -} - -template -cycle_group pedersen_commitment::commit(const std::vector>& input_pairs) -{ - - std::vector scalars; - std::vector points; - for (auto& [scalar, context] : input_pairs) { + for (const auto [scalar, point] : zip_view(inputs, base_points)) { scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(scalar)); - // constructs constant cycle_group objects (non-witness) - points.emplace_back(context.generators->get(1, context.offset, context.domain_separator)[0]); + // Construct circuit-constant cycle_group objects representing the generators + points.emplace_back(point); } return cycle_group::batch_mul(points, scalars); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.hpp b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.hpp index e658a1e684b8..31ac834f6042 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.hpp @@ -11,18 +11,21 @@ namespace bb::stdlib { -template class pedersen_commitment { +/** + * @brief In-circuit Pedersen commitment implementation + * + * @tparam Builder + */ +template class pedersen_commitment { private: - using bool_t = stdlib::bool_t; - using field_t = stdlib::field_t; - using EmbeddedCurve = typename cycle_group::Curve; - using GeneratorContext = crypto::GeneratorContext; - using cycle_group = stdlib::cycle_group; - using cycle_scalar = typename stdlib::cycle_group::cycle_scalar; + using field_t = stdlib::field_t; // BN254 scalar field element + using cycle_group = stdlib::cycle_group; // Grumpkin curve point + using EmbeddedCurve = typename cycle_group::Curve; // Grumpkin curve type + using cycle_scalar = typename cycle_group::cycle_scalar; // Grumpkin scalar field element + using GeneratorContext = crypto::GeneratorContext; // Generator configuration public: static cycle_group commit(const std::vector& inputs, GeneratorContext context = {}); - static cycle_group commit(const std::vector>& input_pairs); }; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.test.cpp index 8b7ae2415bfa..7e252b9750b3 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/commitment/pedersen/pedersen.test.cpp @@ -5,9 +5,11 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/primitives/test_utils.hpp" #include "pedersen.hpp" using namespace bb; +using bb::stdlib::test_utils::check_circuit_and_gate_count; namespace { auto& engine = numeric::get_debug_randomness(); } @@ -20,10 +22,27 @@ template class StdlibPedersen : public testing::Test { using public_witness_ct = typename _curve::public_witness_ct; using pedersen_commitment = typename stdlib::pedersen_commitment; + // Helper to verify pedersen commitment against native implementation + static void verify_commitment(Builder& builder [[maybe_unused]], + const std::vector>& inputs, + crypto::GeneratorContext context = {}) + { + // Extract native values from circuit inputs + std::vector input_vals; + for (const auto& input : inputs) { + input_vals.push_back(input.get_value()); + } + + auto result = pedersen_commitment::commit(inputs, context); + auto expected = crypto::pedersen_commitment::commit_native(input_vals, context); + + EXPECT_EQ(result.x.get_value(), expected.x); + EXPECT_EQ(result.y.get_value(), expected.y); + } + public: static void test_pedersen() { - Builder builder; fr left_in = fr::random_element(); @@ -43,44 +62,212 @@ template class StdlibPedersen : public testing::Test { builder.fix_witness(left.witness_index, left.get_value()); builder.fix_witness(right.witness_index, right.get_value()); - auto out = pedersen_commitment::commit({ left, right }); + std::vector> inputs = { left, right }; + + verify_commitment(builder, inputs); + + check_circuit_and_gate_count(builder, 2912); + } + + static void test_mixed_witnesses_and_constants() + { + Builder builder; + std::vector> inputs; + + for (size_t i = 0; i < 8; ++i) { + fr value = fr::random_element(); + if (i % 2 == 0) { + inputs.push_back(fr_ct(&builder, value)); + } else { + inputs.push_back(witness_ct(&builder, value)); + } + } + + verify_commitment(builder, inputs); + + // Gate count different for Mega because it adds constants for ECC op codes that get reused in ROM table access + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 3994); + } else { + check_circuit_and_gate_count(builder, 3997); + } + } + + static void test_empty_input() + { + Builder builder; + std::vector inputs; + std::vector> witness_inputs; + + // Empty input should return the zero/identity point + auto result = pedersen_commitment::commit(witness_inputs); + + // For empty inputs, the circuit returns (0, 0) which is the identity point + EXPECT_EQ(result.x.get_value(), fr::zero()); + EXPECT_EQ(result.y.get_value(), fr::zero()); + + check_circuit_and_gate_count(builder, 0); + } + + static void test_single_input() + { + Builder builder; + fr input = fr::random_element(); + std::vector> circuit_inputs = { witness_ct(&builder, input) }; + + verify_commitment(builder, circuit_inputs); + check_circuit_and_gate_count(builder, 2838); + + // Expect table size to be 14340 for single input + // i.e. 254 bit scalars handled via 28 9-bit tables (size 2^9) plus one 2-bit table (size 2^2) + // i.e. (28*2^9) + (1*2^2) = 14340 + EXPECT_EQ(builder.get_tables_size(), 14340); + } + + // Test demonstrates lookup table optimization for 2 inputs + static void test_two_inputs() + { + Builder builder; + std::vector> circuit_inputs; + + for (size_t i = 0; i < 2; ++i) { + circuit_inputs.push_back(witness_ct(&builder, fr::random_element())); + } + + verify_commitment(builder, circuit_inputs); + check_circuit_and_gate_count(builder, 2910); + + // Expect table size to be 28680 = 2*14340 for two inputs + // Each input uses one Multitable of size 14340 + EXPECT_EQ(builder.get_tables_size(), 28680); + } + + // Test demonstrates gate count jump when lookup tables can't be used (3+ inputs) + static void test_three_inputs() + { + Builder builder; + std::vector> circuit_inputs; + + for (size_t i = 0; i < 3; ++i) { + circuit_inputs.push_back(witness_ct(&builder, fr::random_element())); + } + + verify_commitment(builder, circuit_inputs); + + // Gate count different for Mega because it adds constants for ECC op codes that get reused in ROM table access + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 3485); + } else { + check_circuit_and_gate_count(builder, 3488); + } + + // Lookup tables size is same as 2 inputs since only the first 2 inputs use lookup tables + EXPECT_EQ(builder.get_tables_size(), 28680); + } + + static void test_large_input() + { + Builder builder; + std::vector> circuit_inputs; + + for (size_t i = 0; i < 32; ++i) { + circuit_inputs.push_back(witness_ct(&builder, fr::random_element())); + } + + verify_commitment(builder, circuit_inputs); + + // Gate count different for Mega because it adds constants for ECC op codes that get reused in ROM table access + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 12156); + } else { + check_circuit_and_gate_count(builder, 12159); + } + } + + static void test_zero_values() + { + Builder builder; + + // Mix of witness and constant zeros/non-zeros + std::vector> circuit_inputs = { + witness_ct(&builder, fr::zero()), + witness_ct(&builder, fr::random_element()), + fr_ct(&builder, fr::zero()), // constant zero + fr_ct(&builder, fr::random_element()) // constant non-zero + }; + + verify_commitment(builder, circuit_inputs); + } + + static void test_custom_generator_context() + { + Builder builder; + std::vector> circuit_inputs; + + for (size_t i = 0; i < 4; ++i) { + circuit_inputs.push_back(witness_ct(&builder, fr::random_element())); + } + + crypto::GeneratorContext context; + context.offset = 10; - info("num gates = ", builder.get_estimated_num_finalized_gates()); + verify_commitment(builder, circuit_inputs, context); + } - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); + static void test_all_constants() + { + Builder builder; + std::vector> circuit_inputs; - auto commit_native = crypto::pedersen_commitment::commit_native({ left.get_value(), right.get_value() }); + for (size_t i = 0; i < 6; ++i) { + circuit_inputs.push_back(fr_ct(&builder, fr::random_element())); + } - EXPECT_EQ(out.x.get_value(), commit_native.x); - EXPECT_EQ(out.y.get_value(), commit_native.y); + verify_commitment(builder, circuit_inputs); + check_circuit_and_gate_count(builder, 0); } - static void test_hash_constants() + static void test_special_field_element() { Builder builder; + std::vector> circuit_inputs = { witness_ct(&builder, + fr(-1)), // p-1, the maximum field element + witness_ct(&builder, fr(-2)), // p-2 + witness_ct(&builder, fr::random_element()) }; + + verify_commitment(builder, circuit_inputs); + } + + static void test_determinism() + { + Builder builder; std::vector inputs; std::vector> witness_inputs; - for (size_t i = 0; i < 8; ++i) { - inputs.push_back(bb::fr::random_element()); - if (i % 2 == 1) { - witness_inputs.push_back(witness_ct(&builder, inputs[i])); - } else { - witness_inputs.push_back(fr_ct(&builder, inputs[i])); - } + for (size_t i = 0; i < 5; ++i) { + inputs.push_back(fr::random_element()); + witness_inputs.push_back(witness_ct(&builder, inputs[i])); } + // Commit twice with same inputs + auto result1 = pedersen_commitment::commit(witness_inputs); + auto result2 = pedersen_commitment::commit(witness_inputs); + + // Should produce identical results + EXPECT_EQ(result1.x.get_value(), result2.x.get_value()); + EXPECT_EQ(result1.y.get_value(), result2.y.get_value()); + auto expected = crypto::pedersen_commitment::commit_native(inputs); - auto result = pedersen_commitment::commit(witness_inputs); + EXPECT_EQ(result1.x.get_value(), expected.x); + EXPECT_EQ(result1.y.get_value(), expected.y); - EXPECT_EQ(result.x.get_value(), expected.x); - EXPECT_EQ(result.y.get_value(), expected.y); + bool check_result = CircuitChecker::check(builder); + EXPECT_EQ(check_result, true); } }; -using CircuitTypes = testing::Types; +using CircuitTypes = testing::Types; TYPED_TEST_SUITE(StdlibPedersen, CircuitTypes); @@ -89,7 +276,57 @@ TYPED_TEST(StdlibPedersen, Small) TestFixture::test_pedersen(); }; -TYPED_TEST(StdlibPedersen, HashConstants) +TYPED_TEST(StdlibPedersen, MixedWitnessesAndConstants) +{ + TestFixture::test_mixed_witnesses_and_constants(); +}; + +TYPED_TEST(StdlibPedersen, EmptyInput) +{ + TestFixture::test_empty_input(); +}; + +TYPED_TEST(StdlibPedersen, SingleInput) +{ + TestFixture::test_single_input(); +}; + +TYPED_TEST(StdlibPedersen, TwoInputs) +{ + TestFixture::test_two_inputs(); +}; + +TYPED_TEST(StdlibPedersen, ThreeInputs) +{ + TestFixture::test_three_inputs(); +}; + +TYPED_TEST(StdlibPedersen, LargeInput) +{ + TestFixture::test_large_input(); +}; + +TYPED_TEST(StdlibPedersen, ZeroValues) +{ + TestFixture::test_zero_values(); +}; + +TYPED_TEST(StdlibPedersen, CustomGeneratorContext) +{ + TestFixture::test_custom_generator_context(); +}; + +TYPED_TEST(StdlibPedersen, AllConstants) +{ + TestFixture::test_all_constants(); +}; + +TYPED_TEST(StdlibPedersen, SpecialFieldElement) +{ + TestFixture::test_special_field_element(); +}; + +TYPED_TEST(StdlibPedersen, Determinism) { - TestFixture::test_hash_constants(); + TestFixture::test_determinism(); }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp index 3f56739af8cc..dadc0d53617b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp @@ -91,8 +91,9 @@ class ECCVMRecursiveFlavor { * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for * portability of our circuits. */ - class VerificationKey - : public StdlibVerificationKey_> { + class VerificationKey : public StdlibVerificationKey_, + VKSerializationMode::NO_METADATA> { public: VerifierCommitmentKey pcs_verification_key; @@ -119,35 +120,14 @@ class ECCVMRecursiveFlavor { } } - /** - * @brief Serialize verification key to field elements. - * - * @return std::vector - */ - std::vector to_field_elements() const override - { - using namespace bb::stdlib::field_conversion; - auto serialize_to_field_buffer = [](const T& input, std::vector& buffer) { - std::vector input_fields = convert_to_bn254_frs(input); - buffer.insert(buffer.end(), input_fields.begin(), input_fields.end()); - }; - - std::vector elements; - for (const Commitment& commitment : this->get_all()) { - serialize_to_field_buffer(commitment, elements); - } - - return elements; - } - /** * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * * @param domain_separator * @param transcript */ - FF add_hash_to_transcript([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + FF hash_through_transcript([[maybe_unused]] const std::string& domain_separator, + [[maybe_unused]] Transcript& transcript) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp index 8b559f033d6c..42ef6f07b73c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp @@ -33,6 +33,8 @@ class ECCVMRecursiveVerifier { StdlibPreIpaProof pre_ipa_proof; StdlibIpaProof ipa_proof; + StdlibProof() = default; + StdlibProof(Builder& builder, const ECCVMProof& eccvm_proof) : pre_ipa_proof(builder, eccvm_proof.pre_ipa_proof) , ipa_proof(builder, eccvm_proof.ipa_proof) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp index ad59e61ea5c9..bb40385bd48f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp @@ -37,7 +37,7 @@ class ECCVMRecursiveTests : public ::testing::Test { using OuterFlavor = std::conditional_t, MegaFlavor, UltraFlavor>; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } /** @@ -128,15 +128,21 @@ class ECCVMRecursiveTests : public ::testing::Test { // Construct a full proof from the recursive verifier circuit { - auto proving_key = std::make_shared(outer_circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, verification_key); + auto prover_instance = std::make_shared(outer_circuit); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, verification_key); OuterVerifier verifier(verification_key); auto proof = prover.construct_proof(); bool verified = verifier.template verify_proof(proof).result; ASSERT_TRUE(verified); } + + // Check that the size of the recursive verifier is consistent with historical expectation + uint32_t NUM_GATES_EXPECTED = 216315; + ASSERT_EQ(static_cast(outer_circuit.get_num_finalized_gates()), NUM_GATES_EXPECTED) + << "Ultra-arithmetized ECCVM Recursive verifier gate count changed! Update this value if you are sure this " + "is expected."; } static void test_recursive_verification_failure() @@ -224,7 +230,7 @@ class ECCVMRecursiveTests : public ::testing::Test { auto [opening_claim, ipa_transcript] = verifier.verify_proof(inner_proof); stdlib::recursion::PairingPoints::add_default_to_public_inputs(outer_circuit); - auto outer_proving_key = std::make_shared(outer_circuit); + auto outer_proving_key = std::make_shared(outer_circuit); auto outer_verification_key = std::make_shared(outer_proving_key->get_precomputed()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/verifier_commitment_key.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/verifier_commitment_key.hpp index 708f6a4e242e..b9b3051bffc7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/verifier_commitment_key.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/verifier_commitment_key.hpp @@ -26,7 +26,7 @@ template class VerifierCommitmentKey { * applying the pippenger point table so the values at odd indices contain the point {srs[i-1].x * beta, * srs[i-1].y}, where beta is the endomorphism. We retrieve only the original SRS for IPA verification. * - * @details The Grumpkin SRS points will be initialised as constants in the circuit but might be subsequently + * @details The Grumpkin SRS points will be initialized as constants in the circuit but might be subsequently * turned into constant witnesses to make operations in the circuit more efficient. */ VerifierCommitmentKey([[maybe_unused]] Builder* builder, diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp index d9accb320c59..63efdb73b40a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp @@ -14,40 +14,26 @@ namespace bb::stdlib { template struct ecdsa_signature { stdlib::byte_array r; stdlib::byte_array s; - stdlib::byte_array v; // v is single byte (byte_array of size 1) + + Builder* get_context() const + { + if (r.get_context() != nullptr) { + return r.get_context(); + } + + if (s.get_context() != nullptr) { + return s.get_context(); + } + + return nullptr; + } }; template -bool_t ecdsa_verify_signature(const stdlib::byte_array& message, +bool_t ecdsa_verify_signature(const stdlib::byte_array& hashed_message, const G1& public_key, const ecdsa_signature& sig); -template -bool_t ecdsa_verify_signature_noassert(const stdlib::byte_array& message, - const G1& public_key, - const ecdsa_signature& sig); - -template -bool_t ecdsa_verify_signature_prehashed_message_noassert(const stdlib::byte_array& hashed_message, - const G1& public_key, - const ecdsa_signature& sig); - -template -static ecdsa_signature ecdsa_from_witness(Builder* ctx, const crypto::ecdsa_signature& input) -{ - std::vector r_vec(std::begin(input.r), std::end(input.r)); - std::vector s_vec(std::begin(input.s), std::end(input.s)); - std::vector v_vec = { input.v }; // Create single-element vector for v - stdlib::byte_array r(ctx, r_vec); - stdlib::byte_array s(ctx, s_vec); - stdlib::byte_array v(ctx, v_vec); // v is now a byte_array with size 1 - ecdsa_signature out; - out.r = r; - out.s = s; - out.v = v; - return out; -} - template void generate_ecdsa_verification_test_circuit(Builder& builder, size_t num_iterations); } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.test.cpp index 1bc23ef12506..b047e8ee2257 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa.test.cpp @@ -6,198 +6,466 @@ #include "barretenberg/circuit_checker/circuit_checker.hpp" #include "barretenberg/common/test.hpp" #include "ecdsa.hpp" +#include "ecdsa_tests_data.hpp" + +#include + +#include using namespace bb; using namespace bb::crypto; -using Builder = UltraCircuitBuilder; -using curve_ = stdlib::secp256k1; -using curveR1 = stdlib::secp256r1; +template class EcdsaTests : public ::testing::Test { + public: + using Builder = Curve::Builder; + using CurveType = + std::conditional_t; + + // Native Types + using FrNative = Curve::fr; + using FqNative = Curve::fq; + using G1Native = Curve::g1; + + // Stdlib types + using Fr = Curve::bigfr_ct; + using Fq = Curve::fq_ct; + using G1 = Curve::g1_bigfr_ct; + using bool_t = Curve::bool_ct; + + // Reproducible signature + static constexpr FrNative private_key = + FrNative("0xd67abee717b3fc725adf59e2cc8cd916435c348b277dd814a34e3ceb279436c2"); + + enum class TamperingMode : std::uint8_t { + InvalidR, + InvalidS, + HighS, + OutOfBoundsHash, + ZeroR, + ZeroS, + InfinityScalarMul, + InvalidPubKey, + InfinityPubKey, + None + }; -TEST(stdlib_ecdsa, verify_signature) + std::pair, ecdsa_signature> generate_dummy_ecdsa_data(std::string message_string, + bool random_signature) + { + ecdsa_key_pair account; + + account.private_key = random_signature ? FrNative::random_element() : private_key; + account.public_key = G1Native::one * account.private_key; + + ecdsa_signature signature = + ecdsa_construct_signature(message_string, account); + + if (random_signature) { + // Logging in case of random signature + info("The private key used generate this signature is: ", private_key); + } + + return { account, signature }; + } + + /** + * @brief Generate valid signature for the message Fr(1) + * + * @return ecdsa_signature + */ + ecdsa_signature generate_signature_out_of_bounds_hash() + { + // Generate signature + ecdsa_signature signature; + + FrNative fr_hash = FrNative::one(); + FrNative k = FrNative::random_element(); + typename G1Native::affine_element R = G1Native::one * k; + FqNative::serialize_to_buffer(R.x, &signature.r[0]); + + FrNative r = FrNative::serialize_from_buffer(&signature.r[0]); + FrNative k_inverse = k.invert(); + FrNative s = k_inverse * (fr_hash + r * private_key); + bool is_s_low = (static_cast(s) < (FrNative::modulus + 1) / 2); + s = is_s_low ? s : -s; + FrNative::serialize_to_buffer(s, &signature.s[0]); + + FqNative r_fq(R.x); + bool is_r_finite = (uint256_t(r_fq) == uint256_t(r)); + bool y_parity = uint256_t(R.y).get_bit(0); + bool recovery_bit = y_parity ^ is_s_low; + constexpr uint8_t offset = 27; + + int value = + offset + static_cast(recovery_bit) + (static_cast(2) * static_cast(!is_r_finite)); + BB_ASSERT_LTE(value, UINT8_MAX); + signature.v = static_cast(value); + + // Natively verify signature + FrNative s_inverse = s.invert(); + typename G1Native::affine_element Q = G1Native::one * ((fr_hash * s_inverse) + (r * s_inverse * private_key)); + BB_ASSERT_EQ(static_cast(Q.x), + static_cast(r), + "Signature with out of bounds message failed verification"); + + return signature; + } + + std::string tampering(std::string message_string, + ecdsa_key_pair& account, + ecdsa_signature& signature, + TamperingMode mode) + { + std::string failure_msg; + + switch (mode) { + case TamperingMode::InvalidR: { + // Invalidate the signature by changing r. + FrNative r = FrNative::serialize_from_buffer(&signature.r[0]); + r += FrNative::one(); + + FrNative::serialize_to_buffer(r, &signature.r[0]); + break; + } + case TamperingMode::InvalidS: { + // Invalidate the signature by changing s. + FrNative s = FrNative::serialize_from_buffer(&signature.s[0]); + s += FrNative::one(); + + FrNative::serialize_to_buffer(s, &signature.s[0]); + break; + } + case TamperingMode::HighS: { + // Invalidate the signature by changing s to -s. + FrNative s = FrNative::serialize_from_buffer(&signature.s[0]); + s = -s; + + FrNative::serialize_to_buffer(s, &signature.s[0]); + failure_msg = "ECDSA input validation: the s component of the signature is bigger than Fr::modulus - s.: " + "hi limb."; // The second part of the message is added by the range constraint + break; + } + case TamperingMode::OutOfBoundsHash: { + // Invalidate the circuit by passing a message whose hash is bigger than n + // (the message will be hard-coded in the circuit at a later point) + signature = generate_signature_out_of_bounds_hash(); + + failure_msg = "ECDSA input validation: the hash of the message is bigger than the order of the elliptic " + "curve.: hi limb."; // The second part of the message is added by the range constraint + break; + } + case TamperingMode::ZeroR: { + // Invalidate signature by setting r to 0 + signature.r = std::array{}; + + failure_msg = "ECDSA input validation: the r component of the signature is zero."; + break; + } + case TamperingMode::ZeroS: { + // Invalidate signature by setting s to 0 + signature.s = std::array{}; + + failure_msg = "ECDSA input validation: the s component of the signature is zero."; + break; + } + case TamperingMode::InfinityScalarMul: { + // Invalidate the signature by making making u1 * G + u2 * P return the point at infinity + + // Compute H(m) + std::vector buffer; + std::ranges::copy(message_string, std::back_inserter(buffer)); + auto hash = Sha256Hasher::hash(buffer); + + // Override the public key: new public key is (-hash) * r^{-1} * G + FrNative fr_hash = FrNative::serialize_from_buffer(&hash[0]); + FrNative r = FrNative::serialize_from_buffer(&signature.r[0]); + FrNative r_inverse = r.invert(); + FrNative modified_private_key = r_inverse * (-fr_hash); + account.public_key = G1Native::one * modified_private_key; + + // Verify that the result is the point at infinity + auto P = G1Native::one * fr_hash + account.public_key * r; + BB_ASSERT_EQ(P.is_point_at_infinity(), true); + + failure_msg = "ECDSA validation: the result of the batch multiplication is the point at infinity."; + break; + } + case TamperingMode::InvalidPubKey: { + // Invalidate the circuit by passing a public key which is not on the curve + account.public_key.x = account.public_key.y; + BB_ASSERT_EQ(account.public_key.on_curve(), false); + + failure_msg = "ECDSA input validation: the public key is not a point on the elliptic curve."; + break; + } + case TamperingMode::InfinityPubKey: { + // Invalidate the circuit by passing a public key which is not on the curve + account.public_key.self_set_infinity(); + BB_ASSERT_EQ(account.public_key.is_point_at_infinity(), true); + + failure_msg = "ECDSA input validation: the public key is the point at infinity."; + break; + } + case TamperingMode::None: + break; + } + + // Natively verify that the tampering was successfull + bool is_signature_valid = ecdsa_verify_signature( + message_string, account.public_key, signature); + if (mode == TamperingMode::HighS || mode == TamperingMode::InfinityScalarMul) { + // If either s >= (n+1)/2 or the result of the scalar multiplication is the point at infinity, then the + // verification function raises an error, we treat it as an invalid signature + is_signature_valid = false; + } + + bool expected = mode == TamperingMode::None; + BB_ASSERT_EQ(is_signature_valid, + expected, + "Signature verification returned a different result from the expected one. If the signature was " + "randomly generated, there is a (very) small chance this is not a bug."); + + return failure_msg; + } + + std::pair> create_stdlib_ecdsa_data( + Builder& builder, const ecdsa_key_pair& account, const ecdsa_signature& signature) + { + // We construct the point via its x,y-coordinates to avoid the on curve check of G1::from_witness. In this way + // we test the on curve check of the ecdsa verification function + Fq x = Fq::from_witness(&builder, account.public_key.x); + Fq y = Fq::from_witness(&builder, account.public_key.y); + bool_t is_infinity( + stdlib::witness_t(&builder, account.public_key.is_point_at_infinity() ? fr::one() : fr::zero()), + false); + G1 pub_key(x, y, is_infinity); + pub_key.set_free_witness_tag(); + BB_ASSERT_EQ(pub_key.is_point_at_infinity().get_value(), account.public_key.is_point_at_infinity()); + + std::vector rr(signature.r.begin(), signature.r.end()); + std::vector ss(signature.s.begin(), signature.s.end()); + + stdlib::ecdsa_signature sig{ stdlib::byte_array(&builder, rr), + stdlib::byte_array(&builder, ss) }; + + return { pub_key, sig }; + } + + void ecdsa_verification_circuit(Builder& builder, + const stdlib::byte_array& hashed_message, + const ecdsa_key_pair& account, + const ecdsa_signature& signature, + const bool signature_verification_result, + const bool circuit_checker_result, + const std::string failure_msg) + + { + auto [public_key, sig] = create_stdlib_ecdsa_data(builder, account, signature); + + // Verify signature + stdlib::bool_t signature_result = + stdlib::ecdsa_verify_signature(hashed_message, public_key, sig); + + // Enforce verification returns the expected result + signature_result.assert_equal(stdlib::bool_t(signature_verification_result)); + + // Check native values + EXPECT_EQ(signature_result.get_value(), signature_verification_result); + + // Log data + std::cerr << "num gates = " << builder.get_estimated_num_finalized_gates() << std::endl; + benchmark_info(Builder::NAME_STRING, + "ECDSA", + "Signature Verification Test", + "Gate Count", + builder.get_estimated_num_finalized_gates()); + + // Circuit checker + bool is_circuit_satisfied = CircuitChecker::check(builder); + EXPECT_EQ(is_circuit_satisfied, circuit_checker_result); + + // Check the error + EXPECT_EQ(builder.err(), failure_msg); + } + + stdlib::byte_array construct_hashed_message(Builder& builder, + std::vector& message_bytes, + TamperingMode mode) + { + stdlib::byte_array message(&builder, message_bytes); + stdlib::byte_array hashed_message; + + if (mode == TamperingMode::OutOfBoundsHash) { + // In this case the message is already hashed, so we mock the hashing constraints for consistency but + // hard-code the message + [[maybe_unused]] stdlib::byte_array _ = + static_cast>(stdlib::SHA256::hash(message)); + + // Hard-coded witness + std::array hashed_message_witness; + + // The hashed message is FrNative::modulus + 1 + FqNative fr_hash = FqNative(FrNative::modulus + 1); + FqNative::serialize_to_buffer(fr_hash, &hashed_message_witness[0]); + + hashed_message = stdlib::byte_array( + &builder, std::vector(hashed_message_witness.begin(), hashed_message_witness.end())); + } else { + hashed_message = static_cast>(stdlib::SHA256::hash(message)); + } + + return hashed_message; + } + + void test_verify_signature(bool random_signature, TamperingMode mode) + { + // Map tampering mode to signature verification result + bool signature_verification_result = + (mode == TamperingMode::None) || (mode == TamperingMode::HighS) || (mode == TamperingMode::OutOfBoundsHash); + // Map tampering mode to circuit checker result + bool circuit_checker_result = + (mode == TamperingMode::None) || (mode == TamperingMode::InvalidR) || (mode == TamperingMode::InvalidS); + + std::string message_string = "Goblin"; + std::vector message_bytes(message_string.begin(), message_string.end()); + + auto [account, signature] = generate_dummy_ecdsa_data(message_string, /*random_signature=*/random_signature); + + // Tamper with the signature + std::string failure_msg = tampering(message_string, account, signature, mode); + + // Create ECDSA verification circuit + Builder builder; + + // Compute H(m) + stdlib::byte_array hashed_message = construct_hashed_message(builder, message_bytes, mode); + + // ECDSA verification + ecdsa_verification_circuit(builder, + hashed_message, + account, + signature, + signature_verification_result, + circuit_checker_result, + failure_msg); + } + + /** + * @brief Construct tests based on data fetched from the Wycherproof project + * + * @param tests + */ + void test_wycherproof(std::vector> tests) + { + for (auto test : tests) { + // Keypair + ecdsa_key_pair account; + account.private_key = FrNative::one(); // Dummy value, unused + account.public_key = typename G1Native::affine_element(test.x, test.y); + + // Signature + std::array r; + std::array s; + uint8_t v = 0; // Dummy value, unused + FrNative::serialize_to_buffer(test.r, &r[0]); + FrNative::serialize_to_buffer(test.s, &s[0]); + + // Create ECDSA verification circuit + Builder builder; + + // Compute H(m) + stdlib::byte_array hashed_message = + construct_hashed_message(builder, test.message, TamperingMode::None); + + // ECDSA verification + ecdsa_verification_circuit(builder, + hashed_message, + account, + { r, s, v }, + test.is_valid_signature, + test.is_circuit_satisfied, + test.failure_msg); + } + } +}; + +using Curves = testing::Types, + stdlib::secp256r1, + stdlib::secp256k1, + stdlib::secp256r1>; + +TYPED_TEST_SUITE(EcdsaTests, Curves); + +TYPED_TEST(EcdsaTests, VerifyRandomSignature) { - Builder builder = Builder(); - - // whaaablaghaaglerijgeriij - std::string message_string = "Instructions unclear, ask again later."; - - ecdsa_key_pair account; - account.private_key = curve_::fr::random_element(); - account.public_key = curve_::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - bool first_result = ecdsa_verify_signature( - message_string, account.public_key, signature); - EXPECT_EQ(first_result, true); - - curve_::g1_bigfr_ct public_key = curve_::g1_bigfr_ct::from_witness(&builder, account.public_key); - - std::vector rr(signature.r.begin(), signature.r.end()); - std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { signature.v }; - - stdlib::ecdsa_signature sig{ curve_::byte_array_ct(&builder, rr), - curve_::byte_array_ct(&builder, ss), - curve_::byte_array_ct(&builder, vv) }; - - curve_::byte_array_ct message(&builder, message_string); - - curve_::bool_ct signature_result = - stdlib::ecdsa_verify_signature( - message, public_key, sig); - - EXPECT_EQ(signature_result.get_value(), true); - - std::cerr << "num gates = " << builder.get_estimated_num_finalized_gates() << std::endl; - benchmark_info(Builder::NAME_STRING, - "ECDSA", - "Signature Verification Test", - "Gate Count", - builder.get_estimated_num_finalized_gates()); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + TestFixture::test_verify_signature(/*random_signature=*/true, TestFixture::TamperingMode::None); } -TEST(stdlib_ecdsa, verify_r1_signature) +TYPED_TEST(EcdsaTests, VerifySignature) { - Builder builder = Builder(); - - std::string message_string = "Instructions unclear, ask again later."; - - ecdsa_key_pair account; - account.private_key = curveR1::fr::random_element(); - account.public_key = curveR1::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - bool first_result = ecdsa_verify_signature( - message_string, account.public_key, signature); - EXPECT_EQ(first_result, true); - - curveR1::g1_bigfr_ct public_key = curveR1::g1_bigfr_ct::from_witness(&builder, account.public_key); - - std::vector rr(signature.r.begin(), signature.r.end()); - std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { signature.v }; - - stdlib::ecdsa_signature sig{ curveR1::byte_array_ct(&builder, rr), - curveR1::byte_array_ct(&builder, ss), - curveR1::byte_array_ct(&builder, vv) }; - - curveR1::byte_array_ct message(&builder, message_string); - - curveR1::bool_ct signature_result = - stdlib::ecdsa_verify_signature( - message, public_key, sig); - - EXPECT_EQ(signature_result.get_value(), true); - - std::cerr << "num gates = " << builder.get_estimated_num_finalized_gates() << std::endl; - benchmark_info(Builder::NAME_STRING, - "ECDSA", - "Signature Verification Test", - "Gate Count", - builder.get_estimated_num_finalized_gates()); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::None); } -TEST(stdlib_ecdsa, ecdsa_verify_signature_noassert_succeed) +TYPED_TEST(EcdsaTests, InvalidR) { - Builder builder = Builder(); - - // whaaablaghaaglerijgeriij - std::string message_string = "Instructions unclear, ask again later."; - - ecdsa_key_pair account; - account.private_key = curve_::fr::random_element(); - account.public_key = curve_::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - bool first_result = ecdsa_verify_signature( - message_string, account.public_key, signature); - EXPECT_EQ(first_result, true); - - curve_::g1_bigfr_ct public_key = curve_::g1_bigfr_ct::from_witness(&builder, account.public_key); - - std::vector rr(signature.r.begin(), signature.r.end()); - std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { signature.v }; - - stdlib::ecdsa_signature sig{ - curve_::byte_array_ct(&builder, rr), - curve_::byte_array_ct(&builder, ss), - curve_::byte_array_ct(&builder, vv), - }; - - curve_::byte_array_ct message(&builder, message_string); - - curve_::bool_ct signature_result = - stdlib::ecdsa_verify_signature_noassert( - message, public_key, sig); - - EXPECT_EQ(signature_result.get_value(), true); - - std::cerr << "num gates = " << builder.get_estimated_num_finalized_gates() << std::endl; - benchmark_info(Builder::NAME_STRING, - "ECDSA", - "Signature Verification Test", - "Gate Count", - builder.get_estimated_num_finalized_gates()); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::InvalidR); } -TEST(stdlib_ecdsa, ecdsa_verify_signature_noassert_fail) +TYPED_TEST(EcdsaTests, InvalidS) { - Builder builder = Builder(); - - // whaaablaghaaglerijgeriij - std::string message_string = "Instructions unclear, ask again later."; - - ecdsa_key_pair account; - account.private_key = curve_::fr::random_element(); - account.public_key = curve_::g1::one * account.private_key; - - ecdsa_signature signature = - ecdsa_construct_signature(message_string, account); - - // tamper w. signature to make fail - signature.r[0] += 1; + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::InvalidS); +} - bool first_result = ecdsa_verify_signature( - message_string, account.public_key, signature); - EXPECT_EQ(first_result, false); +TYPED_TEST(EcdsaTests, HighS) +{ + // Disable asserts because native ecdsa verification raises an error if s >= (n+1)/2 + BB_DISABLE_ASSERTS(); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::HighS); +} - curve_::g1_bigfr_ct public_key = curve_::g1_bigfr_ct::from_witness(&builder, account.public_key); +TYPED_TEST(EcdsaTests, ZeroR) +{ + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::ZeroR); +} - std::vector rr(signature.r.begin(), signature.r.end()); - std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { 27 }; // Use a valid recovery id +TYPED_TEST(EcdsaTests, ZeroS) +{ + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::ZeroS); +} - stdlib::ecdsa_signature sig{ curve_::byte_array_ct(&builder, rr), - curve_::byte_array_ct(&builder, ss), - curve_::byte_array_ct(&builder, vv) }; +TYPED_TEST(EcdsaTests, InvalidPubKey) +{ + // Disable asserts because `validate_on_curve` raises an error in the `mult_madd` function: + // BB_ASSERT_EQ(remainder_1024.lo, uint512_t(0)) + BB_DISABLE_ASSERTS(); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::InvalidPubKey); +} - curve_::byte_array_ct message(&builder, message_string); +TYPED_TEST(EcdsaTests, InfinityPubKey) +{ + // Disable asserts to avoid errors trying to invert zero + BB_DISABLE_ASSERTS(); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::InfinityPubKey); +} - curve_::bool_ct signature_result = - stdlib::ecdsa_verify_signature_noassert( - message, public_key, sig); +TYPED_TEST(EcdsaTests, OutOfBoundsHash) +{ + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::OutOfBoundsHash); +} - EXPECT_EQ(signature_result.get_value(), false); +TYPED_TEST(EcdsaTests, InfinityScalarMul) +{ + // Disable asserts because native ecdsa verification raises an error if the result of the scalar multiplication is + // the point at infinity + BB_DISABLE_ASSERTS(); + TestFixture::test_verify_signature(/*random_signature=*/false, TestFixture::TamperingMode::InfinityScalarMul); +} - std::cerr << "num gates = " << builder.get_estimated_num_finalized_gates() << std::endl; - benchmark_info(Builder::NAME_STRING, - "ECDSA", - "Signature Verification Test", - "Gate Count", - builder.get_estimated_num_finalized_gates()); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); +TYPED_TEST(EcdsaTests, Wycherproof) +{ + if constexpr (TypeParam::type == bb::CurveType::SECP256K1) { + TestFixture::test_wycherproof(stdlib::secp256k1_tests); + } else { + TestFixture::test_wycherproof(stdlib::secp256r1_tests); + } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp index efeb41fd2f30..3fa0a83ce997 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp @@ -18,221 +18,145 @@ auto& engine = numeric::get_debug_randomness(); } /** - * @brief Verify ECDSA signature. Produces unsatisfiable constraints if signature fails + * @brief Verify ECDSA signature. Returns bool_t(true/false) depending on whether the signature is valid or not. + * + * @details Fix the following notation: + * 1. \f$E\f$ is an elliptic curve over the base field \f$\mathbb{F}_q\f$. + * 2. \f$G\f$ is a generator of the group of points of \f$E\f$, the order of \f$G\f$ is \f$n\f$. + * 3. \f$a \in \mathbb{F}_n^{\ast}\f$ is a private key, and \f$P := aG\f$ is the associated public key + * 4. \f$\mathbf{H}\f$ is a hash function + * + * Given a message \f$m\f$, a couple \f$(r,s)\f$ is a valid signature for the message \f$m\f$ with respect to the public + * key \f$P\f$ if: + * 1. \f$P\f$ is a point on \f$E\f$ + * 2. \f$P\f$ is not the point at infinity + * 3. \f$0 < r < n\f$ + * 4. \f$0 < s < (n+1) / 2\f$ + * 5. Define \f$e := \mathbf{H}(m) \mod n\f$ and \f$Q := e s^{-1} G + r s^{-1} P \f$ + * 6. \f$Q\f$ is not the point at infinity + * 7. \f$Q_x = r \mod n\f$ (note that \f$Q_x \in \mathbb{F}_q\f$) + * + * @note The requirement of step 4. is to avoid signature malleability: if \f$(r,s)\f$ is a valid signature for + * message \f$m\f$ and public key \f$P\f$, so is \f$(r,n-s)\f$. We protect against malleability by enforcing that + * \f$s\f$ is always the lowest of the two possible values. + * + * @note In Ethereum signatures contain also a recovery byte \f$v\f$ which is used to recover the public key for which + * the signature is to be validated. As we receive the public key as part of the inputs to the verification function, we + * do not handle the recovery byte. The signature which is the input to the verification function is given by + * \f$(r,s)\f$. The users of the verification function should handle the recovery byte if that is in their interest. + * + * @note This function verifies that `sig` is a valid signature for the public key `public_key`. The function returns + * an in-circuit boolean value which bears witness to whether the signature verification was successfull or not. The + * boolean is NOT constrained to be equal to bool_t(true). + * + * @note The circuit introduces constraints for the following assertions: + * 1. \f$P\f$ is on the curve + * 2. \f$P\f$ is on the point at infinity + * 3. \f$H(m) < n\f$ + * 4. \f$0 < r < n\f$ + * 5. \f$0 < s < (n+1)/2\f$ + * 6. \f$Q := H(m) s^{-1} G + r s^{-1} P\f$ is not the point at infinity + * Therefore, if the witnesses passed to this function do not satisfy these constraints, the resulting circuit + * will be unsatisfied. If a user wants to use the verification inside a in-circuit branch, then they need to supply + * valid data for \f$m, P, r, s\f$, even though \f$(r,s)\f$ doesn't need to be a valid signature. * * @tparam Builder * @tparam Curve * @tparam Fq * @tparam Fr * @tparam G1 - * @param message + * @param hashed_message * @param public_key * @param sig * @return bool_t */ template -bool_t ecdsa_verify_signature(const stdlib::byte_array& message, +bool_t ecdsa_verify_signature(const stdlib::byte_array& hashed_message, const G1& public_key, const ecdsa_signature& sig) { - Builder* ctx = message.get_context() ? message.get_context() : public_key.x.context; - - BB_ASSERT_EQ(sig.v.size(), 1ULL, "ecdsa: v must be a single byte"); - - /** - * Check if recovery id v is either 27 ot 28. - * - * The v in an (r, s, v) ecdsa signature is the 8-bit recovery id s.t. v ∈ {0, 1, 2, 3}. - * It is used to recover signing public key from an ecdsa signature. In practice, the value - * of v is offset by 27 following the convention from the original bitcoin whitepaper. - * - * The value of v depends on the point R = (x, y) s.t. r = x % |Fr| - * 0: y is even && x < |Fr| (x = r) - * 1: y is odd && x < |Fr| (x = r) - * 2: y is even && |Fr| <= x < |Fq| (x = r + |Fr|) - * 3: y is odd && |Fr| <= x < |Fq| (x = r + |Fr|) - * - * It is highly unlikely for x be be in [|Fr|, |Fq|) for the secp256k1 curve because: - * P(|Fr| <= x < |Fq|) = 1 - |Fr|/|Fq| ≈ 0. - * Therefore, it is reasonable to assume that the value of v will always be 0 or 1 - * (i.e. 27 or 28 with offset). In fact, the ethereum yellow paper [1] only allows v to be 27 or 28 - * and considers signatures with v ∈ {29, 30} to be non-standard. - * - * TODO(Suyash): EIP-155 allows v > 35 to ensure different v on different chains. - * Do we need to consider that in our circuits? - * - * References: - * [1] Ethereum yellow paper, Appendix E: https://ethereum.github.io/yellowpaper/paper.pdf - * [2] EIP-155: https://eips.ethereum.org/EIPS/eip-155 - * - */ - // Note: This check is also present in the _noassert variation of this method. - sig.v[0].assert_is_in_set({ field_t(27), field_t(28) }, "ecdsa: signature is non-standard"); - - stdlib::byte_array hashed_message = - static_cast>(stdlib::SHA256::hash(message)); - + // Fetch the context + Builder* builder = hashed_message.get_context(); + builder = validate_context(builder, public_key.get_context()); + builder = validate_context(builder, sig.get_context()); + BB_ASSERT_EQ(builder != nullptr, true, "At least one of the inputs should be non-constant."); + + // Turn the hashed message into an element of Fr + // The assertion means that an honest prover has a small probability of not being able to generate a valid proof if + // H(m) >= n. Enforcing this condition introduces a small number of gates, and ensures that signatures cannot be + // forged by finding a collision of H modulo n. While finding such a collision is supposed to be hard even modulo n, + // we protect against this case with this cheap check. Fr z(hashed_message); - z.assert_is_in_field(); + z.assert_is_in_field( + "ECDSA input validation: the hash of the message is bigger than the order of the elliptic curve."); - Fr r(sig.r); - // force r to be < secp256k1 group modulus, so we can compare with `result_mod_r` below - r.assert_is_in_field(); + // Step 1. + public_key.validate_on_curve("ECDSA input validation: the public key is not a point on the elliptic curve."); - Fr s(sig.s); + // Step 2. + public_key.is_point_at_infinity().assert_equal(bool_t(false), + "ECDSA input validation: the public key is the point at infinity."); - // r and s should not be zero - r.assert_is_not_equal(Fr::zero()); - s.assert_is_not_equal(Fr::zero()); + // Step 3. + Fr r(sig.r); + r.assert_is_in_field("ECDSA input validation: the r component of the signature is bigger than the order of the " + "elliptic curve."); // r < n + r.assert_is_not_equal(Fr::zero(), "ECDSA input validation: the r component of the signature is zero."); // 0 < r - // s should be less than |Fr| / 2 - // Read more about this at: https://www.derpturkey.com/inherent-malleability-of-ecdsa-signatures/amp/ - s.assert_less_than((Fr::modulus + 1) / 2); + // Step 4. + Fr s(sig.s); + s.assert_less_than( + (Fr::modulus + 1) / 2, + "ECDSA input validation: the s component of the signature is bigger than Fr::modulus - s."); // s < (n+1)/2 + s.assert_is_not_equal(Fr::zero(), "ECDSA input validation: the s component of the signature is zero."); // 0 < s - // We already checked that s is nonzero + // Step 5. Fr u1 = z.div_without_denominator_check(s); Fr u2 = r.div_without_denominator_check(s); - public_key.validate_on_curve(); - G1 result; - // TODO(Cody): Having Plookup should not determine which curve is used. - // Use special plookup secp256k1 ECDSA mul if available (this relies on k1 endomorphism, and cannot be used for - // other curves) if constexpr (Curve::type == bb::CurveType::SECP256K1) { result = G1::secp256k1_ecdsa_mul(public_key, u1, u2); } else { - result = G1::batch_mul({ G1::one(ctx), public_key }, { u1, u2 }); + // This error comes from the lookup tables used in batch_mul. We could get rid of it by setting with_edgecase = + // true. However, this would increase the gate count, and it would handle a case that should not appear in + // general: someone using plus or minus the generator as a public key. + if ((public_key.get_value().x == Curve::g1::affine_one.x) && (!builder->failed())) { + builder->failure("ECDSA input validation: the public key is equal to plus or minus the generator point."); + } + result = G1::batch_mul({ G1::one(builder), public_key }, { u1, u2 }); } - result.x.self_reduce(); - - // transfer Fq value x to an Fr element and reduce mod r - Fr result_mod_r(ctx, 0); - result_mod_r.binary_basis_limbs[0].element = result.x.binary_basis_limbs[0].element; - result_mod_r.binary_basis_limbs[1].element = result.x.binary_basis_limbs[1].element; - result_mod_r.binary_basis_limbs[2].element = result.x.binary_basis_limbs[2].element; - result_mod_r.binary_basis_limbs[3].element = result.x.binary_basis_limbs[3].element; - result_mod_r.binary_basis_limbs[0].maximum_value = result.x.binary_basis_limbs[0].maximum_value; - result_mod_r.binary_basis_limbs[1].maximum_value = result.x.binary_basis_limbs[1].maximum_value; - result_mod_r.binary_basis_limbs[2].maximum_value = result.x.binary_basis_limbs[2].maximum_value; - result_mod_r.binary_basis_limbs[3].maximum_value = result.x.binary_basis_limbs[3].maximum_value; - - result_mod_r.prime_basis_limb = result.x.prime_basis_limb; - - result_mod_r.assert_is_in_field(); - - result_mod_r.binary_basis_limbs[0].element.assert_equal(r.binary_basis_limbs[0].element); - result_mod_r.binary_basis_limbs[1].element.assert_equal(r.binary_basis_limbs[1].element); - result_mod_r.binary_basis_limbs[2].element.assert_equal(r.binary_basis_limbs[2].element); - result_mod_r.binary_basis_limbs[3].element.assert_equal(r.binary_basis_limbs[3].element); - result_mod_r.prime_basis_limb.assert_equal(r.prime_basis_limb); - return bool_t(ctx, true); -} - -/** - * @brief Verify ECDSA signature. Returns 0 if signature fails (i.e. does not produce unsatisfiable constraints) - * - * @tparam Builder - * @tparam Curve - * @tparam Fq - * @tparam Fr - * @tparam G1 - * @param hashed_message - * @param public_key - * @param sig - * @return bool_t - */ -template -bool_t ecdsa_verify_signature_prehashed_message_noassert(const stdlib::byte_array& hashed_message, - const G1& public_key, - const ecdsa_signature& sig) -{ - Builder* ctx = hashed_message.get_context() ? hashed_message.get_context() : public_key.x.context; - - BB_ASSERT_EQ(sig.v.size(), 1ULL, "ecdsa: v must be a single byte"); - - Fr z(hashed_message); - z.assert_is_in_field(); - Fr r(sig.r); - // force r to be < secp256k1 group modulus, so we can compare with `result_mod_r` below - r.assert_is_in_field(); - - Fr s(sig.s); + // Step 6. + result.is_point_at_infinity().assert_equal( + bool_t(false), "ECDSA validation: the result of the batch multiplication is the point at infinity."); - // r and s should not be zero - r.assert_is_not_equal(Fr::zero()); - s.assert_is_not_equal(Fr::zero()); - - // s should be less than |Fr| / 2 - // Read more about this at: https://www.derpturkey.com/inherent-malleability-of-ecdsa-signatures/amp/ - s.assert_less_than((Fr::modulus + 1) / 2); + // Step 7. + // We reduce result.x to 2^s, where s is the smallest s.t. 2^s > q. It is cheap in terms of constraints, and avoids + // possible edge cases + result.x.self_reduce(); - Fr u1 = z / s; - Fr u2 = r / s; + // Transfer Fq value result.x to Fr (this is just moving from a C++ class to another) + Fr result_x_mod_r = Fr::unsafe_construct_from_limbs(result.x.binary_basis_limbs[0].element, + result.x.binary_basis_limbs[1].element, + result.x.binary_basis_limbs[2].element, + result.x.binary_basis_limbs[3].element); + // Copy maximum limb values from Fq to Fr: this is needed by the subtraction happening in the == operator + for (size_t idx = 0; idx < 4; idx++) { + result_x_mod_r.binary_basis_limbs[idx].maximum_value = result.x.binary_basis_limbs[idx].maximum_value; + } - public_key.validate_on_curve(); + // Check result.x = r mod n + bool_t is_signature_valid = result_x_mod_r == r; - G1 result; - // Use special plookup secp256k1 ECDSA mul if available (this relies on k1 endomorphism, and cannot be used for - // other curves) - if constexpr (Curve::type == bb::CurveType::SECP256K1) { - result = G1::secp256k1_ecdsa_mul(public_key, u1, u2); + // Logging + if (is_signature_valid.get_value()) { + vinfo("ECDSA signature verification succeeded."); } else { - result = G1::batch_mul({ G1::one(ctx), public_key }, { u1, u2 }); + vinfo("ECDSA signature verification failed"); } - result.x.self_reduce(); - - // transfer Fq value x to an Fr element and reduce mod r - Fr result_mod_r(ctx, 0); - result_mod_r.binary_basis_limbs[0].element = result.x.binary_basis_limbs[0].element; - result_mod_r.binary_basis_limbs[1].element = result.x.binary_basis_limbs[1].element; - result_mod_r.binary_basis_limbs[2].element = result.x.binary_basis_limbs[2].element; - result_mod_r.binary_basis_limbs[3].element = result.x.binary_basis_limbs[3].element; - result_mod_r.binary_basis_limbs[0].maximum_value = result.x.binary_basis_limbs[0].maximum_value; - result_mod_r.binary_basis_limbs[1].maximum_value = result.x.binary_basis_limbs[1].maximum_value; - result_mod_r.binary_basis_limbs[2].maximum_value = result.x.binary_basis_limbs[2].maximum_value; - result_mod_r.binary_basis_limbs[3].maximum_value = result.x.binary_basis_limbs[3].maximum_value; - - result_mod_r.prime_basis_limb = result.x.prime_basis_limb; - - result_mod_r.assert_is_in_field(); - - bool_t output(ctx, true); - output &= result_mod_r.binary_basis_limbs[0].element == (r.binary_basis_limbs[0].element); - output &= result_mod_r.binary_basis_limbs[1].element == (r.binary_basis_limbs[1].element); - output &= result_mod_r.binary_basis_limbs[2].element == (r.binary_basis_limbs[2].element); - output &= result_mod_r.binary_basis_limbs[3].element == (r.binary_basis_limbs[3].element); - output &= result_mod_r.prime_basis_limb == (r.prime_basis_limb); - - sig.v[0].assert_is_in_set({ field_t(27), field_t(28) }, "ecdsa: signature is non-standard"); - - return output; -} -/** - * @brief Verify ECDSA signature. Returns 0 if signature fails (i.e. does not produce unsatisfiable constraints) - * - * @tparam Builder - * @tparam Curve - * @tparam Fq - * @tparam Fr - * @tparam G1 - * @param message - * @param public_key - * @param sig - * @return bool_t - */ -template -bool_t ecdsa_verify_signature_noassert(const stdlib::byte_array& message, - const G1& public_key, - const ecdsa_signature& sig) -{ - stdlib::byte_array hashed_message = - static_cast>(stdlib::SHA256::hash(message)); - - return ecdsa_verify_signature_prehashed_message_noassert( - hashed_message, public_key, sig); + return is_signature_valid; } /** @@ -244,44 +168,51 @@ bool_t ecdsa_verify_signature_noassert(const stdlib::byte_array void generate_ecdsa_verification_test_circuit(Builder& builder, size_t num_iterations) { - using curve = stdlib::secp256k1; - using fr = typename curve::fr; - using fq = typename curve::fq; - using g1 = typename curve::g1; + using Curve = stdlib::secp256k1; + + // Native types + using FrNative = typename Curve::fr; + using FqNative = typename Curve::fq; + using G1Native = typename Curve::g1; + + // Stdlib types + using Fr = typename Curve::bigfr_ct; + using Fq = typename Curve::fq_ct; + using G1 = typename Curve::g1_bigfr_ct; std::string message_string = "Instructions unclear, ask again later."; - crypto::ecdsa_key_pair account; + crypto::ecdsa_key_pair account; for (size_t i = 0; i < num_iterations; i++) { // Generate unique signature for each iteration - account.private_key = curve::fr::random_element(&engine); - account.public_key = curve::g1::one * account.private_key; + account.private_key = FrNative::random_element(&engine); + account.public_key = G1Native::one * account.private_key; crypto::ecdsa_signature signature = - crypto::ecdsa_construct_signature(message_string, account); + crypto::ecdsa_construct_signature(message_string, + account); - bool first_result = crypto::ecdsa_verify_signature( + bool native_verification = crypto::ecdsa_verify_signature( message_string, account.public_key, signature); - static_cast(first_result); // TODO(Cody): This is not used anywhere. + BB_ASSERT_EQ(native_verification, true, "Native ECDSA verification failed while generating test circuit."); std::vector rr(signature.r.begin(), signature.r.end()); std::vector ss(signature.s.begin(), signature.s.end()); - std::vector vv = { signature.v }; - typename curve::g1_bigfr_ct public_key = curve::g1_bigfr_ct::from_witness(&builder, account.public_key); + G1 public_key = G1::from_witness(&builder, account.public_key); + + ecdsa_signature sig{ byte_array(&builder, rr), byte_array(&builder, ss) }; - stdlib::ecdsa_signature sig{ typename curve::byte_array_ct(&builder, rr), - typename curve::byte_array_ct(&builder, ss), - typename curve::byte_array_ct(&builder, vv) }; + byte_array message(&builder, message_string); - typename curve::byte_array_ct message(&builder, message_string); + // Compute H(m) + stdlib::byte_array hashed_message = + static_cast>(stdlib::SHA256::hash(message)); // Verify ecdsa signature - stdlib::ecdsa_verify_signature(message, public_key, sig); + bool_t result = + stdlib::ecdsa_verify_signature(hashed_message, public_key, sig); + result.assert_equal(bool_t(true)); } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_tests_data.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_tests_data.hpp new file mode 100644 index 000000000000..325521a8d87c --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_tests_data.hpp @@ -0,0 +1,136 @@ + + +#include +#include +#include +#include +#include + +#include "barretenberg/ecc/curves/secp256k1/secp256k1.hpp" +#include "barretenberg/ecc/curves/secp256r1/secp256r1.hpp" + +namespace bb::stdlib { + +using namespace bb; +using namespace bb::curve; + +template struct WycherproofTest { + using Fr = Curve::ScalarField; + using Fq = Curve::BaseField; + + // Public Key + Fq x; + Fq y; + + // Data + std::vector message; + Fr r; + Fr s; + bool is_valid_signature; + bool is_circuit_satisfied; + std::string comment; + std::string failure_msg; +}; + +using WycherproofSecp256k1 = WycherproofTest; +using WycherproofSecp256r1 = WycherproofTest; + +/** + * @brief Test for Secp256k1 ECDSA signatures taken from the Wycherproof project + * + */ +const std::vector secp256k1_tests{ + // Arithmetic error tests + WycherproofSecp256k1{ + .x = WycherproofSecp256k1::Fq("0x02ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee77"), + .y = WycherproofSecp256k1::Fq("0x7eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866"), + .message = { 0x31, 0x32, 0x33, 0x34, 0x30, 0x30 }, + .r = WycherproofSecp256k1::Fr("0x0000000000000000000000000000000000000000000000000000000000000101"), + .s = WycherproofSecp256k1::Fr("0xc58b162c58b162c58b162c58b162c58a1b242973853e16db75c8a1a71da4d39d"), + .is_valid_signature = true, + .is_circuit_satisfied = false, + .comment = "Arithmetic error, s is larger than (n+1)/2", + .failure_msg = + "ECDSA input validation: the s component of the signature is bigger than Fr::modulus - s.: hi limb.", + }, + WycherproofSecp256k1{ + .x = WycherproofSecp256k1::Fq("0xd6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c65"), + .y = WycherproofSecp256k1::Fq("0x4a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265"), + .message = { 0x31, 0x32, 0x33, 0x34, 0x30, 0x30 }, + .r = WycherproofSecp256k1::Fr("0x00000000000000000000000000000000000000062522bbd3ecbe7c39e93e7c26"), + .s = WycherproofSecp256k1::Fr("0x783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57"), + .is_valid_signature = true, + .is_circuit_satisfied = true, + .comment = "Arithmetic error, r component is small", + .failure_msg = "", + }, + // Point duplication tests + WycherproofSecp256k1{ + .x = WycherproofSecp256k1::Fq("0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"), + .y = WycherproofSecp256k1::Fq("0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"), + .message = { 0x31, 0x32, 0x33, 0x34, 0x30, 0x30 }, + .r = WycherproofSecp256k1::Fr("0xbb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca605023"), + .s = WycherproofSecp256k1::Fr("0x2492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952"), + .is_valid_signature = false, + .is_circuit_satisfied = true, + .comment = "Point duplication, public key shares x-coordinates with generator", + .failure_msg = "", + }, + // Edge case public key tests + WycherproofSecp256k1{ + .x = WycherproofSecp256k1::Fq("0x6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff"), + .y = WycherproofSecp256k1::Fq("0x00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1"), + .message = { 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65 }, + .r = WycherproofSecp256k1::Fr("0x6d6a4f556ccce154e7fb9f19e76c3deca13d59cc2aeb4ecad968aab2ded45965"), + .s = WycherproofSecp256k1::Fr("0x53b9fa74803ede0fc4441bf683d56c564d3e274e09ccf47390badd1471c05fb7"), + .is_valid_signature = true, + .is_circuit_satisfied = true, + .comment = "Edge case public key, y coordinate is small", + .failure_msg = "", + }, +}; + +/** + * @brief Test for Secp256r1 ECDSA signatures taken from the Wycherproof project + * + */ +const std::vector secp256r1_tests{ + // Arithmetic error test + WycherproofSecp256r1{ + .x = WycherproofSecp256r1::Fq("0x8d3c2c2c3b765ba8289e6ac3812572a25bf75df62d87ab7330c3bdbad9ebfa5c"), + .y = WycherproofSecp256r1::Fq("0x4c6845442d66935b238578d43aec54f7caa1621d1af241d4632e0b780c423f5d"), + .message = { 0x31, 0x32, 0x33, 0x34, 0x30, 0x30 }, + .r = WycherproofSecp256r1::Fr("0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296"), + .s = WycherproofSecp256r1::Fr("0x16a4502e2781e11ac82cbc9d1edd8c981584d13e18411e2f6e0478c34416e3bb"), + .is_valid_signature = true, + .is_circuit_satisfied = true, + .comment = "Arithmetic error", + .failure_msg = "", + }, + // Point duplication test + WycherproofSecp256r1{ + .x = WycherproofSecp256r1::Fq("0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296"), + .y = WycherproofSecp256r1::Fq("0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5"), + .message = { 0x31, 0x32, 0x33, 0x34, 0x30, 0x30 }, + .r = WycherproofSecp256r1::Fr("0xbb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca605023"), + .s = WycherproofSecp256r1::Fr("0x249249246db6db6ddb6db6db6db6db6dad4591868595a8ee6bf5f864ff7be0c2"), + .is_valid_signature = false, + .is_circuit_satisfied = + false, // When the public key is equal to ±G, the circuit fails because of the generation of lookup tables + .comment = "Point duplication, public key shares x-coordinates with generator", + .failure_msg = "ECDSA input validation: the public key is equal to plus or minus the generator point.", + }, + // Edge case public key test + WycherproofSecp256r1{ + .x = WycherproofSecp256r1::Fq("0x4f337ccfd67726a805e4f1600ae2849df3807eca117380239fbd816900000000"), + .y = WycherproofSecp256r1::Fq("0xed9dea124cc8c396416411e988c30f427eb504af43a3146cd5df7ea60666d685"), + .message = { 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65 }, + .r = WycherproofSecp256r1::Fr("0x0fe774355c04d060f76d79fd7a772e421463489221bf0a33add0be9b1979110b"), + .s = WycherproofSecp256r1::Fr("0x500dcba1c69a8fbd43fa4f57f743ce124ca8b91a1f325f3fac6181175df55737"), + .is_valid_signature = true, + .is_circuit_satisfied = true, + .comment = "Edge case public key, x-coordinate has many trailing zeros", + .failure_msg = "", + }, +}; +} // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp index 0004676872d9..508f66a5cb94 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp @@ -29,8 +29,8 @@ schnorr_signature_bits schnorr_convert_signature(C* context, const crypto::sc const uint8_t* e_ptr = &signature.e[0]; numeric::read(s_ptr, s_bigint); numeric::read(e_ptr, e_bigint); - schnorr_signature_bits sig{ .s = cycle_scalar::from_witness_bitstring(context, s_bigint, 256), - .e = cycle_scalar::from_witness_bitstring(context, e_bigint, 256) }; + schnorr_signature_bits sig{ .s = cycle_scalar::from_u256_witness(context, s_bigint), + .e = cycle_scalar::from_u256_witness(context, e_bigint) }; return sig; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp index a852624235a7..0a41fecb7df2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp @@ -123,7 +123,7 @@ TEST(stdlib_schnorr, schnorr_signature_verification_result) stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, longer_string); bool_ct signature_result = schnorr_signature_verification_result(message, pub_key, sig); - EXPECT_EQ(signature_result.witness_bool, true); + EXPECT_EQ(signature_result.get_value(), true); info("num gates = ", builder.get_estimated_num_finalized_gates()); @@ -166,7 +166,7 @@ TEST(stdlib_schnorr, signature_verification_result_failure) stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, message_string); bool_ct signature_result = schnorr_signature_verification_result(message, pub_key2_ct, sig); - EXPECT_EQ(signature_result.witness_bool, false); + EXPECT_EQ(signature_result.get_value(), false); info("num gates = ", builder.get_estimated_num_finalized_gates()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp index d21dc8cb43d6..bd3246a50a44 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp @@ -16,10 +16,11 @@ namespace bb::stdlib::recursion::honk { * */ GoblinRecursiveVerifierOutput GoblinRecursiveVerifier::verify(const GoblinProof& proof, - const MergeCommitments& merge_commitments) + const MergeCommitments& merge_commitments, + const MergeSettings merge_settings) { StdlibProof stdlib_proof(*builder, proof); - return verify(stdlib_proof, merge_commitments); + return verify(stdlib_proof, merge_commitments, merge_settings); } /** @@ -30,10 +31,11 @@ GoblinRecursiveVerifierOutput GoblinRecursiveVerifier::verify(const GoblinProof& * */ GoblinRecursiveVerifierOutput GoblinRecursiveVerifier::verify(const StdlibProof& proof, - const MergeCommitments& merge_commitments) + const MergeCommitments& merge_commitments, + const MergeSettings merge_settings) { // Verify the final merge step - MergeVerifier merge_verifier{ builder, MergeSettings::PREPEND, transcript }; + MergeVerifier merge_verifier{ builder, merge_settings, transcript }; auto [merge_pairing_points, merged_table_commitments] = merge_verifier.verify_proof(proof.merge_proof, merge_commitments); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp index e2d567681c73..d29bb18c2a1c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp @@ -48,6 +48,8 @@ class GoblinRecursiveVerifier { StdlibEccvmProof eccvm_proof; // contains pre-IPA and IPA proofs StdlibHonkProof translator_proof; + StdlibProof() = default; + StdlibProof(Builder& builder, const GoblinProof& goblin_proof) : merge_proof(builder, goblin_proof.merge_proof) , eccvm_proof(builder, @@ -61,12 +63,16 @@ class GoblinRecursiveVerifier { const std::shared_ptr& transcript = std::make_shared()) : builder(builder) , verification_keys(verification_keys) - , transcript(transcript){}; + , transcript(transcript) {}; [[nodiscard("IPA claim and Pairing points should be accumulated")]] GoblinRecursiveVerifierOutput verify( - const GoblinProof&, const MergeCommitments& merge_commitments); + const GoblinProof&, + const MergeCommitments& merge_commitments, + const MergeSettings merge_settings = MergeSettings::PREPEND); [[nodiscard("IPA claim and Pairing points should be accumulated")]] GoblinRecursiveVerifierOutput verify( - const StdlibProof&, const MergeCommitments& merge_commitments); + const StdlibProof&, + const MergeCommitments& merge_commitments, + const MergeSettings merge_settings = MergeSettings::PREPEND); private: Builder* builder; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp index 56402a0920ee..59da953be31f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp @@ -19,52 +19,79 @@ class GoblinRecursiveVerifierTests : public testing::Test { using OuterFlavor = UltraFlavor; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using Commitment = MergeVerifier::Commitment; using RecursiveCommitment = GoblinRecursiveVerifier::MergeVerifier::Commitment; using MergeCommitments = MergeVerifier::InputCommitments; using RecursiveMergeCommitments = GoblinRecursiveVerifier::MergeVerifier::InputCommitments; - + using FF = TranslatorFlavor::FF; + using BF = TranslatorFlavor::BF; static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + // Compute the size of a Translator commitment (in bb::fr's) + static constexpr size_t comm_frs = bb::field_conversion::calc_num_bn254_frs(); // 4 + static constexpr size_t eval_frs = bb::field_conversion::calc_num_bn254_frs(); // 1 + + // The `op` wire commitment is currently the second element of the proof, following the + // `accumulated_result` which is a BN254 BaseField element. + static constexpr size_t offset = bb::field_conversion::calc_num_bn254_frs(); + struct ProverOutput { GoblinProof proof; Goblin::VerificationKey verifier_input; MergeCommitments merge_commitments; RecursiveMergeCommitments recursive_merge_commitments; }; + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1298): + // Better recursion testing - create more flexible proof tampering tests. + // Modify the `op` commitment which a part of the Merge protocol. + static void tamper_with_op_commitment(HonkProof& translator_proof) + { + + // Extract `op` fields and convert them to a Commitment object + auto element_frs = std::span{ translator_proof }.subspan(offset, comm_frs); + auto op_commitment = NativeTranscriptParams::template deserialize(element_frs); + // Modify the commitment + op_commitment = op_commitment * FF(2); + // Serialize the tampered commitment into the proof (overwriting the valid one). + auto op_commitment_reserialized = bb::NativeTranscriptParams::serialize(op_commitment); + std::copy(op_commitment_reserialized.begin(), + op_commitment_reserialized.end(), + translator_proof.begin() + static_cast(offset)); + }; + + // Translator proof ends with [..., Libra:quotient_eval, Shplonk:Q, KZG:W]. We invalidate the proof by multiplying + // the eval by 2 (it leads to a Libra consistency check failure). + static void tamper_with_libra_eval(HonkProof& translator_proof) + { + // Proof tail size + static constexpr size_t tail_size = 2 * comm_frs + eval_frs; // 2*4 + 1 = 9 + // Index of the target field (one fr) from the beginning + const size_t idx = translator_proof.size() - tail_size; + + // Tamper: multiply by 2 (or tweak however you like) + translator_proof[idx] = translator_proof[idx] + translator_proof[idx]; + }; /** * @brief Create a goblin proof and the VM verification keys needed by the goblin recursive verifier * * @return ProverOutput */ - static ProverOutput create_goblin_prover_output(Builder* outer_builder = nullptr, const size_t NUM_CIRCUITS = 3) + static ProverOutput create_goblin_prover_output(Builder* outer_builder = nullptr, const size_t num_circuits = 5) { Goblin goblin; - // Construct and accumulate multiple circuits - for (size_t idx = 0; idx < NUM_CIRCUITS - 1; ++idx) { - MegaCircuitBuilder builder{ goblin.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder); - goblin.prove_merge(); - } - - Goblin goblin_final; - goblin_final.op_queue = goblin.op_queue; - MegaCircuitBuilder builder{ goblin_final.op_queue }; - builder.queue_ecc_no_op(); - GoblinMockCircuits::construct_simple_circuit(builder); + GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, num_circuits); // Merge the ecc ops from the newly constructed circuit - goblin_final.op_queue->merge(); - + auto goblin_proof = goblin.prove(MergeSettings::APPEND); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; - auto t_current = goblin_final.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin_final.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin_final.op_queue->get_ultra_ops_table_num_rows()); + auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); + auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); @@ -77,11 +104,15 @@ class GoblinRecursiveVerifierTests : public testing::Test { RecursiveCommitment::from_witness(outer_builder, merge_commitments.t_commitments[idx]); recursive_merge_commitments.T_prev_commitments[idx] = RecursiveCommitment::from_witness(outer_builder, merge_commitments.T_prev_commitments[idx]); + // Removing the free witness tag, since the merge commitments in the full scheme are supposed to + // be fiat-shamirred earlier + recursive_merge_commitments.t_commitments[idx].unset_free_witness_tag(); + recursive_merge_commitments.T_prev_commitments[idx].unset_free_witness_tag(); } } // Output is a goblin proof plus ECCVM/Translator verification keys - return { goblin_final.prove(), + return { goblin_proof, { std::make_shared(), std::make_shared() }, merge_commitments, recursive_merge_commitments }; @@ -98,7 +129,7 @@ TEST_F(GoblinRecursiveVerifierTests, NativeVerification) std::shared_ptr verifier_transcript = std::make_shared(); - EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript)); + EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript, MergeSettings::APPEND)); } /** @@ -113,7 +144,7 @@ TEST_F(GoblinRecursiveVerifierTests, Basic) create_goblin_prover_output(&builder); GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); info("Recursive Verifier: num gates = ", builder.num_gates); @@ -124,9 +155,10 @@ TEST_F(GoblinRecursiveVerifierTests, Basic) // Construct and verify a proof for the Goblin Recursive Verifier circuit { - auto proving_key = std::make_shared(builder); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, verification_key); + auto prover_instance = std::make_shared(builder); + auto verification_key = + std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, verification_key); OuterVerifier verifier(verification_key); auto proof = prover.construct_proof(); bool verified = verifier.template verify_proof(proof).result; @@ -147,25 +179,26 @@ TEST_F(GoblinRecursiveVerifierTests, IndependentVKHash) create_goblin_prover_output(&builder, inner_size); GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); info("Recursive Verifier: num gates = ", builder.num_gates); // Construct and verify a proof for the Goblin Recursive Verifier circuit - auto proving_key = std::make_shared(builder); + auto prover_instance = std::make_shared(builder); auto outer_verification_key = - std::make_shared(proving_key->get_precomputed()); - OuterProver prover(proving_key, outer_verification_key); + std::make_shared(prover_instance->get_precomputed()); + OuterProver prover(prover_instance, outer_verification_key); OuterVerifier outer_verifier(outer_verification_key); return { builder.blocks, outer_verification_key }; }; - auto [blocks_2, verification_key_2] = get_blocks(2); - auto [blocks_4, verification_key_4] = get_blocks(4); + auto [blocks_5, verification_key_5] = get_blocks(5); + auto [blocks_6, verification_key_6] = get_blocks(6); - compare_ultra_blocks_and_verification_keys({ blocks_2, blocks_4 }, - { verification_key_2, verification_key_4 }); + compare_ultra_blocks_and_verification_keys({ blocks_5, blocks_6 }, + { verification_key_5, verification_key_6 }); } /** @@ -214,13 +247,7 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorFailure) // Tamper with the Translator proof preamble { GoblinProof tampered_proof = proof; - for (auto& val : tampered_proof.translator_proof) { - if (val > 0) { // tamper by finding the first non-zero value and incrementing it by 1 - val += 1; - break; - } - } - + tamper_with_op_commitment(tampered_proof.translator_proof); Builder builder; RecursiveMergeCommitments recursive_merge_commitments; @@ -229,24 +256,19 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorFailure) RecursiveCommitment::from_witness(&builder, merge_commitments.t_commitments[idx]); recursive_merge_commitments.T_prev_commitments[idx] = RecursiveCommitment::from_witness(&builder, merge_commitments.T_prev_commitments[idx]); + recursive_merge_commitments.t_commitments[idx].fix_witness(); + recursive_merge_commitments.T_prev_commitments[idx].fix_witness(); } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(tampered_proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(tampered_proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } - // Tamper with the Translator proof non-preamble values + // Tamper with the Translator proof non - preamble values { auto tampered_proof = proof; - int seek = 10; - for (auto& val : tampered_proof.translator_proof) { - if (val > 0) { // tamper by finding the tenth non-zero value and incrementing it by 1 - if (--seek == 0) { - val += 1; - break; - } - } - } + tamper_with_libra_eval(tampered_proof.translator_proof); Builder builder; @@ -256,10 +278,13 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorFailure) RecursiveCommitment::from_witness(&builder, merge_commitments.t_commitments[idx]); recursive_merge_commitments.T_prev_commitments[idx] = RecursiveCommitment::from_witness(&builder, merge_commitments.T_prev_commitments[idx]); + recursive_merge_commitments.t_commitments[idx].fix_witness(); + recursive_merge_commitments.T_prev_commitments[idx].fix_witness(); } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(tampered_proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(tampered_proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } } @@ -282,7 +307,8 @@ TEST_F(GoblinRecursiveVerifierTests, TranslationEvaluationsFailure) proof.eccvm_proof.pre_ipa_proof[op_limb_index] += 1; GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } @@ -295,9 +321,6 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorMergeConsistencyFailure) { { - using Commitment = TranslatorFlavor::Commitment; - using FF = TranslatorFlavor::FF; - using BF = TranslatorFlavor::BF; Builder builder; @@ -307,34 +330,14 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorMergeConsistencyFailure) std::shared_ptr verifier_transcript = std::make_shared(); // Check natively that the proof is correct. - EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript)); - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1298): - // Better recursion testing - create more flexible proof tampering tests. - // Modify the `op` commitment which a part of the Merge protocol. - auto tamper_with_op_commitment = [](HonkProof& translator_proof) { - // Compute the size of a Translator commitment (in bb::fr's) - static constexpr size_t num_frs_comm = bb::field_conversion::calc_num_bn254_frs(); - // The `op` wire commitment is currently the second element of the proof, following the - // `accumulated_result` which is a BN254 BaseField element. - static constexpr size_t offset = bb::field_conversion::calc_num_bn254_frs(); - // Extract `op` fields and convert them to a Commitment object - auto element_frs = std::span{ translator_proof }.subspan(offset, num_frs_comm); - auto op_commitment = NativeTranscriptParams::template deserialize(element_frs); - // Modify the commitment - op_commitment = op_commitment * FF(2); - // Serialize the tampered commitment into the proof (overwriting the valid one). - auto op_commitment_reserialized = bb::NativeTranscriptParams::serialize(op_commitment); - std::copy(op_commitment_reserialized.begin(), - op_commitment_reserialized.end(), - translator_proof.begin() + static_cast(offset)); - }; + EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript, MergeSettings::APPEND)); tamper_with_op_commitment(proof.translator_proof); // Construct and check the Goblin Recursive Verifier circuit GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/stdlib/hash/CMakeLists.txt index eed29196d4e8..e0a7c157b8f2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/CMakeLists.txt @@ -3,4 +3,5 @@ add_subdirectory(blake3s) add_subdirectory(pedersen) add_subdirectory(sha256) add_subdirectory(keccak) -add_subdirectory(poseidon2) \ No newline at end of file +add_subdirectory(poseidon2) +add_subdirectory(pfuzzer) \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp index d32ccf59132f..085bf068dcb6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp @@ -11,8 +11,22 @@ namespace bb::stdlib { using namespace bb; -template -field_t pedersen_hash::hash(const std::vector& inputs, const GeneratorContext context) +/** + * @brief Computes a pedersen hash of the provided inputs + * @details The pedersen hash is computed as the x-coordinate of the point: P = \sum_i inputs[i] * G_i + len * H, where + * G_i and H are generator points of the Grumpkin curve and len is the number of inputs. The len * H term is included to + * avoid the trivial collision that otherwise results from negating all inputs. See crypto::pedersen_hash for more + * details. + * @note: The inputs are elements of the bn254 scalar field but are interpreted as scalars in the Grumpkin scalar field + * (represented by cycle_scalar). + * + * @tparam Builder + * @param inputs The field elements to be hashed + * @param context (optional) context for generator selection/construction + * @return field_t The x-coordinate of the resulting pedersen hash point + */ +template +field_t pedersen_hash::hash(const std::vector& inputs, const GeneratorContext context) { using cycle_scalar = typename cycle_group::cycle_scalar; using Curve = EmbeddedCurve; @@ -23,75 +37,22 @@ field_t pedersen_hash::hash(const std::vector& inputs, const Gen std::vector points; scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(field_ct(inputs.size()))); points.emplace_back(crypto::pedersen_hash_base::length_generator); - for (size_t i = 0; i < inputs.size(); ++i) { - scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(inputs[i])); - // constructs constant cycle_group objects (non-witness) - points.emplace_back(base_points[i]); + for (const auto [point, scalar] : zip_view(base_points, inputs)) { + scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(scalar)); + // Construct circuit-constant cycle_group objects (non-witness) + points.emplace_back(point); } auto result = cycle_group::batch_mul(points, scalars); - return result.x; -} - -template -field_t pedersen_hash::hash_skip_field_validation(const std::vector& inputs, - const GeneratorContext context) -{ - using cycle_scalar = typename cycle_group::cycle_scalar; - using Curve = EmbeddedCurve; - - const auto base_points = context.generators->get(inputs.size(), context.offset, context.domain_separator); - - std::vector scalars; - std::vector points; - scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(field_ct(inputs.size()))); - points.emplace_back(crypto::pedersen_hash_base::length_generator); - for (size_t i = 0; i < inputs.size(); ++i) { - // `true` param = skip primality test when performing a scalar mul - scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(inputs[i], true)); - // constructs constant cycle_group objects (non-witness) - points.emplace_back(base_points[i]); + // pedersen hash doesn't use y coordinate of result anymore in the circuit except for hashing + // so we can put result.y in used_witnesses + auto builder_ptr = result.y.get_context(); + if (builder_ptr != nullptr) { + builder_ptr->update_used_witnesses(result.y.witness_index); } - - auto result = cycle_group::batch_mul(points, scalars); return result.x; } -/** - * @brief Hash a byte_array. - * - * TODO(@zac-williamson #2796) Once Poseidon is implemented, replace this method with a more canonical hash algorithm - * (that is less efficient) - */ -template -field_t pedersen_hash::hash_buffer(const stdlib::byte_array& input, GeneratorContext context) -{ - const size_t num_bytes = input.size(); - const size_t bytes_per_element = 31; - size_t num_elements = static_cast(num_bytes % bytes_per_element != 0) + (num_bytes / bytes_per_element); - - std::vector elements; - for (size_t i = 0; i < num_elements; ++i) { - size_t bytes_to_slice = 0; - if (i == num_elements - 1) { - bytes_to_slice = num_bytes - (i * bytes_per_element); - } else { - bytes_to_slice = bytes_per_element; - } - auto element = static_cast(input.slice(i * bytes_per_element, bytes_to_slice)); - elements.emplace_back(element); - } - field_ct hashed; - if (elements.size() < 2) { - hashed = hash(elements, context); - } else { - hashed = hash({ elements[0], elements[1] }, context); - for (size_t i = 2; i < elements.size(); ++i) { - hashed = hash({ hashed, elements[i] }, context); - } - } - return hashed; -} template class pedersen_hash; template class pedersen_hash; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.hpp index 63c2030014e1..fd922956062d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.hpp @@ -31,9 +31,6 @@ template class pedersen_hash { public: static field_ct hash(const std::vector& in, GeneratorContext context = {}); - // TODO health warnings! - static field_ct hash_skip_field_validation(const std::vector& in, GeneratorContext context = {}); - static field_ct hash_buffer(const stdlib::byte_array& input, GeneratorContext context = {}); }; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp index 3b352db98ba1..121cc811ad48 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp @@ -4,10 +4,12 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/primitives/test_utils.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/fixed_base/fixed_base.hpp" #include "pedersen.hpp" using namespace bb; +using bb::stdlib::test_utils::check_circuit_and_gate_count; namespace { auto& engine = numeric::get_debug_randomness(); } @@ -20,9 +22,10 @@ template class StdlibPedersen : public testing::Test { using witness_ct = typename _curve::witness_ct; using public_witness_ct = typename _curve::public_witness_ct; using pedersen_hash = typename stdlib::pedersen_hash; + using cycle_group = typename stdlib::cycle_group; public: - static void test_pedersen() + static void test_pedersen_two() { Builder builder; @@ -46,13 +49,10 @@ template class StdlibPedersen : public testing::Test { fr_ct out = pedersen_hash::hash({ left, right }); - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); - fr hash_native = crypto::pedersen_hash::hash({ left.get_value(), right.get_value() }); EXPECT_EQ(out.get_value(), hash_native); + + check_circuit_and_gate_count(builder, 2897); } static void test_pedersen_edge_cases() @@ -77,11 +77,6 @@ template class StdlibPedersen : public testing::Test { fr_ct out_with_zero = pedersen_hash::hash({ out_1_with_zero, out_2 }); fr_ct out_with_r = pedersen_hash::hash({ out_1_with_r, out_2 }); - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); - EXPECT_EQ(bool(out_1_with_zero.get_value() == out_1_with_r.get_value()), true); fr hash_native_1_with_zero = crypto::pedersen_hash::hash({ zero.get_value(), one.get_value() }); @@ -96,6 +91,8 @@ template class StdlibPedersen : public testing::Test { EXPECT_EQ(out_with_zero.get_value(), hash_native_with_zero); EXPECT_EQ(out_with_r.get_value(), hash_native_with_r); EXPECT_EQ(hash_native_with_zero, hash_native_with_r); + + check_circuit_and_gate_count(builder, 3482); } static void test_pedersen_large() @@ -120,35 +117,7 @@ template class StdlibPedersen : public testing::Test { builder.set_public_input(left.witness_index); - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); - } - - static void test_hash_byte_array() - { - const size_t num_input_bytes = 351; - - Builder builder; - - std::vector input; - input.reserve(num_input_bytes); - for (size_t i = 0; i < num_input_bytes; ++i) { - input.push_back(engine.get_random_uint8()); - } - - fr expected = crypto::pedersen_hash::hash_buffer(input); - - byte_array_ct circuit_input(&builder, input); - auto result = pedersen_hash::hash_buffer(circuit_input); - - EXPECT_EQ(result.get_value(), expected); - - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gate_count(builder, 40379); } static void test_multi_hash() @@ -196,10 +165,13 @@ template class StdlibPedersen : public testing::Test { EXPECT_EQ(result.get_value(), expected); } - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 9724); + } else { + check_circuit_and_gate_count(builder, 9721); + } } static void test_hash_eight() @@ -220,6 +192,14 @@ template class StdlibPedersen : public testing::Test { auto result = pedersen_hash::hash(witness_inputs, hash_idx); EXPECT_EQ(result.get_value(), expected); + + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 5417); + } else { + check_circuit_and_gate_count(builder, 5414); + } } static void test_hash_constants() @@ -242,10 +222,184 @@ template class StdlibPedersen : public testing::Test { auto result = pedersen_hash::hash(witness_inputs); EXPECT_EQ(result.get_value(), expected); + + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 3997); + } else { + check_circuit_and_gate_count(builder, 3994); + } + } + + static void test_empty_input() + { + Builder builder; + std::vector empty_inputs_native; + std::vector empty_inputs; + + [[maybe_unused]] auto native_result = crypto::pedersen_hash::hash(empty_inputs_native); + auto stdlib_result = pedersen_hash::hash(empty_inputs); + + EXPECT_EQ(stdlib_result.get_value(), fr::zero()); + + // NOTE: Empty input handling differs between native and stdlib implementations because the representation of + // the point at infinity differs + // EXPECT_NE(stdlib_result.get_value(), native_result); // They are different! + + check_circuit_and_gate_count(builder, 0); // Empty input returns 0 + } + + static void test_single_input() + { + Builder builder; + + fr value = fr::random_element(); + fr_ct witness = witness_ct(&builder, value); + + auto result = pedersen_hash::hash({ witness }); + auto expected = crypto::pedersen_hash::hash({ value }); + + EXPECT_EQ(result.get_value(), expected); + + check_circuit_and_gate_count(builder, 2823); + } + + static void test_large_inputs() + { + Builder builder; + std::vector native_inputs; + std::vector witness_inputs; + + constexpr size_t size = 200; + for (size_t i = 0; i < size; ++i) { + native_inputs.push_back(fr::random_element()); + witness_inputs.push_back(witness_ct(&builder, native_inputs.back())); + } + + auto result = pedersen_hash::hash(witness_inputs); + auto expected = crypto::pedersen_hash::hash(native_inputs); + + EXPECT_EQ(result.get_value(), expected); + + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 62376); + } else { + check_circuit_and_gate_count(builder, 62373); + } + } + + static void test_generator_contexts() + { + using GeneratorContext = typename crypto::GeneratorContext; + + Builder builder; + + std::vector inputs; + std::vector witness_inputs; + + for (size_t i = 0; i < 5; ++i) { + inputs.push_back(fr::random_element()); + witness_inputs.push_back(witness_ct(&builder, inputs.back())); + } + + // Test 1: Implicit conversion from size_t to GeneratorContext + // When passing a size_t, it becomes GeneratorContext(offset) with default domain separator + for (size_t hash_idx : { size_t(0), size_t(1), size_t(10), size_t(100), size_t(1000) }) { + // This implicitly creates GeneratorContext(hash_idx) + GeneratorContext ctx{ hash_idx }; // For native comparison + auto result = pedersen_hash::hash(witness_inputs, ctx); + auto expected = crypto::pedersen_hash::hash(inputs, ctx); + + EXPECT_EQ(result.get_value(), expected); + } + + // Test 2: Verify that different offsets produce different results + auto result_offset_0 = pedersen_hash::hash(witness_inputs, 0); + auto result_offset_1 = pedersen_hash::hash(witness_inputs, 1); + EXPECT_NE(result_offset_0.get_value(), result_offset_1.get_value()); + + // Test 3: Explicit GeneratorContext with custom domain separators + // Different domain separators should produce different generators and thus different hashes + GeneratorContext ctx_domain_a(0, "domain_a"); + GeneratorContext ctx_domain_b(0, "domain_b"); + GeneratorContext ctx_default(0); // Uses default domain separator + + auto result_domain_a = pedersen_hash::hash(witness_inputs, ctx_domain_a); + auto result_domain_b = pedersen_hash::hash(witness_inputs, ctx_domain_b); + auto result_default = pedersen_hash::hash(witness_inputs, ctx_default); + + // Verify native implementation matches + auto expected_domain_a = crypto::pedersen_hash::hash(inputs, ctx_domain_a); + auto expected_domain_b = crypto::pedersen_hash::hash(inputs, ctx_domain_b); + auto expected_default = crypto::pedersen_hash::hash(inputs, ctx_default); + + EXPECT_EQ(result_domain_a.get_value(), expected_domain_a); + EXPECT_EQ(result_domain_b.get_value(), expected_domain_b); + EXPECT_EQ(result_default.get_value(), expected_default); + + // Different domain separators should produce different results + EXPECT_NE(result_domain_a.get_value(), result_domain_b.get_value()); + EXPECT_NE(result_domain_a.get_value(), result_default.get_value()); + EXPECT_NE(result_domain_b.get_value(), result_default.get_value()); + + // Test 4: Same domain separator with different offsets + GeneratorContext ctx_offset_10(10, "domain_test"); + GeneratorContext ctx_offset_20(20, "domain_test"); + + auto result_offset_10 = pedersen_hash::hash(witness_inputs, ctx_offset_10); + auto result_offset_20 = pedersen_hash::hash(witness_inputs, ctx_offset_20); + + // Different offsets with same domain should produce different results + EXPECT_NE(result_offset_10.get_value(), result_offset_20.get_value()); + + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 21845); + } else { + check_circuit_and_gate_count(builder, 21842); + } + } + + static void test_determinism() + { + Builder builder; + + std::vector inputs; + std::vector witness_inputs; + + for (size_t i = 0; i < 5; ++i) { + inputs.push_back(fr::random_element()); + witness_inputs.push_back(witness_ct(&builder, inputs.back())); + } + + // Hash the same inputs multiple times + auto result1 = pedersen_hash::hash(witness_inputs); + auto result2 = pedersen_hash::hash(witness_inputs); + auto result3 = pedersen_hash::hash(witness_inputs); + + // All should produce the same result + EXPECT_EQ(result1.get_value(), result2.get_value()); + EXPECT_EQ(result2.get_value(), result3.get_value()); + + // Also verify against native implementation + auto expected = crypto::pedersen_hash::hash(inputs); + EXPECT_EQ(result1.get_value(), expected); + + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 6645); + } else { + check_circuit_and_gate_count(builder, 6642); + } } }; -using CircuitTypes = testing::Types; +using CircuitTypes = testing::Types; TYPED_TEST_SUITE(StdlibPedersen, CircuitTypes); @@ -272,13 +426,18 @@ TYPED_TEST(StdlibPedersen, TestHash) EXPECT_EQ(result.get_value(), expected); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + // Note: gate count delta is an illusion due to extra constants added by default in the Mega builder which then + // get resused as ROM indices in the underlying batch mul algorithm (only applies for num_inputs > 2). + if constexpr (std::is_same_v) { + check_circuit_and_gate_count(builder, 5565); + } else { + check_circuit_and_gate_count(builder, 5562); + } } TYPED_TEST(StdlibPedersen, Small) { - TestFixture::test_pedersen(); + TestFixture::test_pedersen_two(); }; TYPED_TEST(StdlibPedersen, EdgeCases) @@ -291,11 +450,6 @@ HEAVY_TYPED_TEST(StdlibPedersen, Large) TestFixture::test_pedersen_large(); }; -TYPED_TEST(StdlibPedersen, HashByteArray) -{ - TestFixture::test_hash_byte_array(); -}; - TYPED_TEST(StdlibPedersen, MultiHash) { TestFixture::test_multi_hash(); @@ -310,3 +464,28 @@ TYPED_TEST(StdlibPedersen, HashConstants) { TestFixture::test_hash_constants(); }; + +TYPED_TEST(StdlibPedersen, EmptyInput) +{ + TestFixture::test_empty_input(); +}; + +TYPED_TEST(StdlibPedersen, SingleInput) +{ + TestFixture::test_single_input(); +}; + +TYPED_TEST(StdlibPedersen, LargeInputs) +{ + TestFixture::test_large_inputs(); +}; + +TYPED_TEST(StdlibPedersen, GeneratorContexts) +{ + TestFixture::test_generator_contexts(); +}; + +TYPED_TEST(StdlibPedersen, Determinism) +{ + TestFixture::test_determinism(); +}; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/CMakeLists.txt new file mode 100644 index 000000000000..c9e8d94fdc2a --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/CMakeLists.txt @@ -0,0 +1,6 @@ +if (FUZZING) +barretenberg_module( + stdlib_pfuzzer + stdlib_poseidon2 + stdlib_pedersen_hash) +endif() \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/poseidon2_pedersen.fuzzer.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/poseidon2_pedersen.fuzzer.cpp new file mode 100644 index 000000000000..9ca60f5c6998 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pfuzzer/poseidon2_pedersen.fuzzer.cpp @@ -0,0 +1,235 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +/** + * @file poseidon2_pedersen.fuzzer.cpp + * @brief Fuzzer for testing Poseidon2 and Pedersen hash Ultra circuits against native implementations + * + * @details This fuzzer implements differential testing of both Poseidon2 and Pedersen hash function + * circuits by comparing Ultra circuit outputs with native implementation results. The fuzzer: + * + * 1. **Dual Algorithm Testing**: Tests both Poseidon2 and Pedersen hash functions in a single fuzzer + * 2. **Algorithm Selection**: Uses the first bit of input data to select between Poseidon2 (0) and Pedersen (1) + * 3. **Structured Input Format**: Uses FieldVM data for deterministic field element generation + * 4. **Ultra Circuit Testing**: Focuses on UltraCircuitBuilder for comprehensive circuit testing + * 5. **Differential Testing**: Compares circuit output with trusted native implementation + * 6. **Variable Input Lengths**: Tests inputs of any length (controlled by fuzzer configuration) + * 7. **Edge Cases**: Tests zero inputs, repeated inputs, and boundary conditions + * 8. **Circuit Verification**: Validates circuit correctness using CircuitChecker + * + * + */ + +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" +#include "barretenberg/crypto/poseidon2/poseidon2.hpp" +#include "barretenberg/ecc/fields/field.fuzzer.hpp" +#include "barretenberg/numeric/random/engine.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" +#include "barretenberg/stdlib/hash/poseidon2/poseidon2.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include +#include +#include +#include +#include + +using namespace bb; +using Fr = fr; +using native_poseidon2 = crypto::Poseidon2; +using native_pedersen = crypto::pedersen_hash; + +// Input structure constants +static constexpr size_t SINGLE_CHUNK_SIZE = 129; // 1 byte for selection of element + 128 bytes for FieldVM data + +/** + * @brief Parse input structure and generate field elements using FieldVM + * @param data Raw input data + * @param size Data size + * @return Vector of field elements for Poseidon2 + */ +std::vector parse_input_and_generate_elements(const uint8_t* data, size_t size) +{ + std::vector elements; + + // Need at least header size + if (size < SINGLE_CHUNK_SIZE) { + return elements; + } + + // Parse header: first byte is number of elements (0-128) + size_t num_elements = size / SINGLE_CHUNK_SIZE; + + // Create FieldVM instance for field element generation + FieldVM field_vm(false, 65536); // Disable debug, max 65536 steps + + // Disable heavy operations for better performance + field_vm.settings.enable_inv = false; // Disable inversion + field_vm.settings.enable_sqrt = false; // Disable square root + field_vm.settings.enable_batch_invert = false; // Disable batch inversion + field_vm.settings.enable_pow = false; // Disable power operation + field_vm.settings.enable_div = false; // Disable division + field_vm.settings.enable_div_assign = false; // Disable division assignment + + // Run FieldVM with data after header (bytes 129+) + size_t fieldvm_data_size = size - num_elements; + if (fieldvm_data_size > 0) { + field_vm.run(data, fieldvm_data_size); + } + + // Extract elements based on indices in header + elements.reserve(num_elements); + for (size_t i = 0; i < num_elements; ++i) { + uint8_t index_byte = data[fieldvm_data_size + i]; // Bytes 1-128 contain indices + + size_t field_index = index_byte % 32; // Wrap around if needed + + // Get element from FieldVM state + Fr element = field_vm.field_internal_state[field_index]; + elements.emplace_back(element); + } + + return elements; +} + +/** + * @brief Test Poseidon2 circuit with specified builder type + * @tparam Builder Circuit builder type + * @param inputs Vector of field elements to hash + * @return true if test passes, false otherwise + */ +template bool test_poseidon2_circuit(const std::vector& inputs) +{ + try { + using field_ct = stdlib::field_t; + using witness_ct = stdlib::witness_t; + + Builder builder; + std::vector circuit_inputs; + circuit_inputs.reserve(inputs.size()); + + // Convert native field elements to circuit witnesses + for (const auto& input : inputs) { + circuit_inputs.emplace_back(field_ct(witness_ct(&builder, input))); + } + + // Compute hash using circuit + auto circuit_result = stdlib::poseidon2::hash(circuit_inputs); + + // Compute hash using native implementation + auto native_result = native_poseidon2::hash(inputs); + + // Compare results + if (circuit_result.get_value() != native_result) { + std::cerr << "Poseidon2 circuit mismatch detected!" << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + std::cerr << "Circuit result: " << circuit_result.get_value() << std::endl; + std::cerr << "Native result: " << native_result << std::endl; + return false; + } + + // Verify circuit correctness + bool circuit_check = CircuitChecker::check(builder); + if (!circuit_check) { + std::cerr << "Poseidon2 circuit check failed!" << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + return false; + } + + return true; + + } catch (const std::exception& e) { + std::cerr << "Exception in Poseidon2 circuit test: " << e.what() << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + return false; + } +} +/** + * @brief Test Pedersen circuit with specified builder type + * @tparam Builder Circuit builder type + * @param inputs Vector of field elements to hash + * @return true if test passes, false otherwise + */ +template bool test_pedersen_circuit(const std::vector& inputs) +{ + try { + using field_ct = stdlib::field_t; + using witness_ct = stdlib::witness_t; + + Builder builder; + std::vector circuit_inputs; + circuit_inputs.reserve(inputs.size()); + + // Convert native field elements to circuit witnesses + for (const auto& input : inputs) { + circuit_inputs.emplace_back(field_ct(witness_ct(&builder, input))); + } + + // Compute hash using circuit + auto circuit_result = stdlib::pedersen_hash::hash(circuit_inputs); + + // Compute hash using native implementation + auto native_result = native_pedersen::hash(inputs); + + // Compare results + if (circuit_result.get_value() != native_result) { + std::cerr << "Pedersen circuit mismatch detected!" << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + std::cerr << "Circuit result: " << circuit_result.get_value() << std::endl; + std::cerr << "Native result: " << native_result << std::endl; + return false; + } + + // Verify circuit correctness + bool circuit_check = CircuitChecker::check(builder); + if (!circuit_check) { + std::cerr << "Pedersen circuit check failed!" << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + return false; + } + + return true; + + } catch (const std::exception& e) { + std::cerr << "Exception in Pedersen circuit test: " << e.what() << std::endl; + std::cerr << "Input length: " << inputs.size() << std::endl; + return false; + } +} + +/** + * @brief Main fuzzer entry point + * @param Data Input data from libfuzzer + * @param Size Size of input data + * @return 0 for success, non-zero for failure + */ +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) +{ + // Security check: Ensure minimum input size + if (Size == 0) { + return 0; // No input data + } + bool is_poseidon2 = Data[0] & 0x01; + + // Parse input structure and generate field elements using FieldVM + auto field_elements = parse_input_and_generate_elements(Data + 1, Size - 1); + + // Security check: Ensure we have valid elements + if (field_elements.empty()) { + return 0; // No valid field elements generated + } + + // Test with Ultra circuit builder only + bool test_result = is_poseidon2 ? test_poseidon2_circuit(field_elements) + : test_pedersen_circuit(field_elements); + + if (!test_result) { + abort(); // Circuit test failed + } + + return 0; // Success +} diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md new file mode 100644 index 000000000000..ee780cb08547 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md @@ -0,0 +1,153 @@ + +# stdlib Poseidon2 Hash Implementation + +Poseidon2 is a **SNARK-friendly cryptographic hash** designed to be efficient inside prime-field arithmetic circuits. +It follows the [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf) and refines the original Poseidon hash. + +This implementation includes: + +- A **sponge construction** over the BN254 scalar field following the (draft) C2SP Poseidon Sponge spec based on the [Duplex Sponge model](https://keccak.team/files/SpongeDuplex.pdf). +- The **Poseidon2 permutation**, i.e.\ the round function used by the sponge. +- **Circuit custom gate relations** that enforce the permutation’s correctness. + + +## The Sponge Construction + +The sponge absorbs input elements into an internal state, applies permutations, and squeezes output elements. + +#### Sponge constants. + - **State size (t)**: 4 field elements + - **Rate (r)**: 3 elements + - **Capacity (c)**: 1 element + + +### Details + +Let the input be +\f[ +\mathbf{a} = (a_0, a_1, \dots, a_{N-1}). +\f] +Partition it into blocks of size \f$r=3\f$: +\f[ +B_j = (a_{3j},, a_{3j+1},, a_{3j+2}) \quad\text{(pad missing entries with 0)},\qquad +m = \left\lceil \frac{N}{3}\right\rceil . +\f] + +### Padding +In Poseidon paper, the padding scheme for variable input length hashing suggests padding with \f$ 10^\ast\f$. + +"Domain Separation for Poseidon" section (see 4.2 in [Poseidon](https://eprint.iacr.org/2019/458.pdf)) suggests using domain separation IV defined as follows +\f[ + \mathrm{IV} = (\texttt{input_length}^{64}) +\f] +Initialize the state: +\f[ + \mathbf{s}^{(0)} = (0,0,0,\mathrm{IV}). +\f] + +Since we only use Poseidon2 sponge with variable length inputs and the length is a part of domain separation, we can pad the inputs with \f$ 0^\ast \f$, which would not lead to collisions (tested \ref StdlibPoseidon2< Builder >::test_padding_collisions "here"). + +Note that we initialize \f$ \mathrm{IV} \f$ as a fixed witness. It ensures that the first invocation of the Poseidon2 permutation leads to a state where all entries are **normalized** witnesses, i.e. they have `multiplicative_constant` equal 1, and `additive_constant` equal 0. + +#### Absorb phase + +For each block \f$j=0,\dots,m-1\f$, +\f[ +\mathbf{s}^{(j+1)} = P\left(\mathbf{s}^{(j)} + (B_j,0)\right), +\f] +where \f$P\f$ is the Poseidon2 permutation and \f$(B_j,0)\f$ is an array of size \f$ 4 \f$ with \f$r\f$ state elements and a \f$0\f$ capacity limb. + +#### Squeeze (single output) + +After absorption, produce one output field element via one duplex step: +\f[ +y_0 = \big(P(\mathbf{s}^{(m)})\big)_0. +\f] + +## The Poseidon2 Permutation + +Each permutation consists of: + +1. **Initial linear layer**: multiply state by external matrix \f$M_E\f$. Corresponds to \ref bb::stdlib::Poseidon2Permutation< Builder >::matrix_multiplication_external "matrix_multiplication_external" method. +2. **4 External rounds (full S-box)**: + - Record the state and the correspoding round constants \f$ c_{0}^{(i)} \f$ into a \ref bb::UltraCircuitBuilder_< FF >::create_poseidon2_external_gate "Poseidon2 External Gate". + - _Natively_ compute the next state. + - Re-write the state with the new witnesses. + - After the final round, \ref bb::stdlib::Poseidon2Permutation< Builder >::record_current_state_into_next_row "record the computed state" in the next row of the Poseidon2 **external** gates block, + as it is required for the custom gate relation. +3. **56 Internal rounds (partial S-box)**: + - Record the state and the correspoding round constants \f$ c_{0}^{(i)} \f$ into a \ref bb::UltraCircuitBuilder_< FF >::create_poseidon2_internal_gate "Poseidon2 Internal Gate". + - _Natively_ compute the next state. + - Re-write the state with the new witnesses. + - After the final round, \ref bb::stdlib::Poseidon2Permutation< Builder >::record_current_state_into_next_row "record the computed state" in the next row of the Poseidon2 **internal** gates block, +4. **Final external rounds** (same as step 2). + +Note that in general, step 1 requires 6 arithmetic gates, the steps 2-4 create total number of rounds + 3 gates. Hence a single invocation of Poseidon2 Permutation results in 73 gates. + +### External Matrix +As proposed in Section 5.1 of [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf), we set +\f[ +M_E = + \begin{bmatrix} + 5 & 7 & 1 & 3 \\ + 4 & 6 & 1 & 1 \\ + 1 & 3 & 5 & 7 \\ + 1 & 1 & 4 & 6 + \end{bmatrix} +\f] + + +### Internal Matrix + +\f[ +M_I = + \begin{bmatrix} + D_1 & 1 & 1 & 1 \\ + 1 & D_2 & 1 & 1 \\ + 1 & 1 & D_3 & 1 \\ + 1 & 1 & 1 & D_4 + \end{bmatrix} +\f] + +### Constants + +The constants are generated using the sage [script authored by Markus Schofnegger](https://github.com/HorizenLabs/poseidon2/blob/main/poseidon2_rust_params.sage) from Horizen Labs. + +### Security Level +Based on Section 3.2 of [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf). + +Given \f$ R_P = 56 \f$, \f$ R_F = 8\f$, \f$ d = 5\f$, \f$ \log_2(p) \approx 254 \f$, we get \f$ 128 \f$ bits of security. + +## Custom Gate Relations + +For an external round with state \f$ \mathbf{u}=(u_1,u_2,u_3,u_4) \f$, define \f$ \mathbf{v}=M_E\cdot\mathbf{u}\f$. +\ref bb::Poseidon2ExternalRelationImpl< FF_ > "Poseidon2 External Relation" enforces that the permuted values equal the values in the next row (accessed via shifts): +\f[ +v_k = w_{k,\mathrm{shift}} \qquad \text{for } k \in \{1,2,3,4\}. +\f] + +We encode four independent constraints under a selector \f$ q_{\mathrm{poseidon2_external}}\f$ and aggregate them with +independent challenges \f$ \alpha_i = \alpha_{i, Poseidon2_ext}\f$ from `SubrelationSeparators`: +\f[ +q_{\mathrm{poseidon2_external}}\cdot +\Big( +\alpha_0\big(v_1 - w_{1,\mathrm{shift}}\big) + +\alpha_1\big(v_2 - w_{2,\mathrm{shift}}\big) + +\alpha_2\big(v_3 - w_{3,\mathrm{shift}}\big) + +\alpha_3\big(v_4 - w_{4,\mathrm{shift}}\big) +\Big) = 0. +\f] +To ensure that the relation holds point-wise on the hypercube, the equation above is also multiplied by the appropriate +scaling factor arising from \ref bb::GateSeparatorPolynomial< FF > "GateSeparatorPolynomial". + +\ref bb::Poseidon2InternalRelationImpl< FF_ > "Internal rounds" follow the same pattern, using \f$ M_I \f$ and the partial S-box on the first element. + + +## Number of Gates + +Hashing a single field element costs \f$ 73 \f$ gates. As above, let \f$ N > 1\f$ be the input size. Define \f$ m = \lceil N/3 \rceil \f$ and let \f$ N_3 = N\pmod{3} \f$. The number of gates depends on the number of padded fields equal to \f$ N_3 \f$. If \f$ N_3 = 0\f$, we get +\f[ 1 + 73\cdot m + 3\cdot (m - 1) \f] +gates, otherwise we get +\f[ 1 + 73\cdot m + 3\cdot (m - 2) + N_3.\f] + +According to TACEO blog post [Poseidon{2} for Noir](https://core.taceo.io/articles/poseidon2-for-noir/), a single permutation cost for \f$ t = 4 \f$ implemented without Poseidon2 custom gates is \f$ 2313 \f$ gates. diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.circuit.failure.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.circuit.failure.test.cpp new file mode 100644 index 000000000000..024aad2f7b18 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.circuit.failure.test.cpp @@ -0,0 +1,185 @@ +#include "barretenberg/flavor/ultra_flavor.hpp" +#include "barretenberg/relations/relation_parameters.hpp" +#include "barretenberg/sumcheck/sumcheck.hpp" + +#include + +using namespace bb; + +class Poseidon2FailureTests : public ::testing::Test { + public: + using Flavor = UltraFlavor; + using ProverInstance = ProverInstance_; + using SumcheckProver = SumcheckProver; + using SumcheckVerifier = SumcheckVerifier; + using FF = Flavor::FF; + using Builder = Flavor::CircuitBuilder; + using Transcript = Flavor::Transcript; + using SubrelationSeparators = Flavor::SubrelationSeparators; + using RelationParameters = RelationParameters; + + void modify_selector(auto& selector) + { + size_t start_idx = selector.start_index(); + size_t end_idx = selector.end_index(); + + // Flip the first non-zero selector value. + for (size_t idx = start_idx; idx < end_idx; idx++) { + if (selector.at(idx) == 1) { + selector.at(idx) = 0; + break; + } + } + } + + void modify_witness(const auto& selector, auto& witness) + { + size_t start_idx = selector.start_index(); + size_t end_idx = selector.end_index(); + + size_t selector_enabled_idx{ 0 }; + // Find the first row index where the selector is enabled. + for (size_t idx = start_idx; idx < end_idx; idx++) { + if (selector.at(idx) == 1) { + selector_enabled_idx = idx; + break; + } + } + // Modify the witness + witness.at(selector_enabled_idx) += 1; + } + void tamper_with_shifts(const auto& selector, auto& witness, bool external) + { + size_t start_idx = selector.start_index(); + size_t end_idx = selector.end_index(); + + size_t selector_enabled_idx{ 0 }; + + for (size_t idx = start_idx; idx < end_idx; idx++) { + if (selector.at(idx) == 1) { + selector_enabled_idx = idx; + break; + } + } + const size_t round_size = external ? 4 : 56; + size_t shift_idx = selector_enabled_idx + round_size; + // The selector must be zero at the row corresponding to the shift. + EXPECT_EQ(selector.at(shift_idx), 0); + // Modify the witness value. As Poseidon2ExternalRelation is comparing this value to the result of applying the + // S-box and M_E to the previous row, this must lead to a sumcheck failure. + witness.at(shift_idx) += 1; + } + + void hash_single_input(Builder& builder) + { + stdlib::field_t random_input(stdlib::witness_t(&builder, fr::random_element())); + random_input.fix_witness(); + [[maybe_unused]] auto hash = stdlib::poseidon2::hash({ random_input }); + } + + void prove_and_verify(const std::shared_ptr& prover_instance, bool expected_result) + { + const size_t virtual_log_n = Flavor::VIRTUAL_LOG_N; + + // Random subrelation separators are needed here to make sure that the sumcheck is failing because of the wrong + // Poseidon2 selector/witness values. + SubrelationSeparators subrelation_separators{}; + for (auto& alpha : subrelation_separators) { + alpha = FF::random_element(); + } + + std::vector gate_challenges(virtual_log_n); + + // Random gate challenges ensure that relations are satisfied at every point of the hypercube + for (auto& beta : gate_challenges) { + beta = FF::random_element(); + } + + RelationParameters relation_parameters; + + for (auto& rel_param : relation_parameters.get_to_fold()) { + rel_param = FF::random_element(); + } + auto prover_transcript = std::make_shared(); + + SumcheckProver sumcheck_prover(prover_instance->dyadic_size(), + prover_instance->polynomials, + prover_transcript, + subrelation_separators, + gate_challenges, + relation_parameters, + virtual_log_n); + auto proof = sumcheck_prover.prove(); + + auto verifier_transcript = std::make_shared(); + verifier_transcript->load_proof(prover_transcript->export_proof()); + + SumcheckVerifier verifier(verifier_transcript, subrelation_separators, virtual_log_n); + auto result = verifier.verify(relation_parameters, gate_challenges, std::vector(virtual_log_n, 1)); + EXPECT_EQ(result.verified, expected_result); + }; +}; + +TEST_F(Poseidon2FailureTests, WrongSelectorValues) +{ + Builder builder; + + // Construct a circuit that hashes a single witness field element. + hash_single_input(builder); + + // Convert circuit to polynomials. + auto prover_instance = std::make_shared>(builder); + { + // Disable Poseidon2 External selector in the first active row + modify_selector(prover_instance->polynomials.q_poseidon2_external); + + // Run sumcheck on the invalidated data + prove_and_verify(prover_instance, false); + } + { + // Disable Poseidon2 Internal selector in the first active row + modify_selector(prover_instance->polynomials.q_poseidon2_internal); + + // Run sumcheck on the invalidated data + prove_and_verify(prover_instance, false); + } +} + +TEST_F(Poseidon2FailureTests, WrongWitnessValues) +{ + Builder builder; + + hash_single_input(builder); + + auto prover_instance = std::make_shared>(builder); + { + modify_witness(prover_instance->polynomials.q_poseidon2_external, prover_instance->polynomials.w_l); + prove_and_verify(prover_instance, false); + } + { + modify_witness(prover_instance->polynomials.q_poseidon2_internal, prover_instance->polynomials.w_r); + prove_and_verify(prover_instance, false); + } +} + +TEST_F(Poseidon2FailureTests, TamperingWithShifts) +{ + Builder builder; + + hash_single_input(builder); + + auto prover_instance = std::make_shared>(builder); + { + bool external_round = true; + tamper_with_shifts( + prover_instance->polynomials.q_poseidon2_external, prover_instance->polynomials.w_l, external_round); + prove_and_verify(prover_instance, false); + } + + { + bool external_round = false; + tamper_with_shifts( + prover_instance->polynomials.q_poseidon2_internal, prover_instance->polynomials.w_l, external_round); + prove_and_verify(prover_instance, false); + } +} diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.cpp index 01ebd680622f..44a02087471e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.cpp @@ -5,46 +5,20 @@ // ===================== #include "barretenberg/stdlib/hash/poseidon2/poseidon2.hpp" -#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" namespace bb::stdlib { -using namespace bb; - /** * @brief Hash a vector of field_ct. */ -template field_t poseidon2::hash(C& builder, const std::vector& inputs) +template field_t poseidon2::hash(const std::vector& inputs) { /* Run the sponge by absorbing all the input and squeezing one output. - * This should just call the sponge variable length hash function * */ - return Sponge::hash_internal(builder, inputs); + return Sponge::hash_internal(inputs); } -/** - * @brief Hash a byte_array. - */ -template field_t poseidon2::hash_buffer(C& builder, const stdlib::byte_array& input) -{ - const size_t num_bytes = input.size(); - const size_t bytes_per_element = 31; // 31 bytes in a fr element - size_t num_elements = static_cast(num_bytes % bytes_per_element != 0) + (num_bytes / bytes_per_element); - - std::vector elements; - for (size_t i = 0; i < num_elements; ++i) { - size_t bytes_to_slice = 0; - if (i == num_elements - 1) { - bytes_to_slice = num_bytes - (i * bytes_per_element); - } else { - bytes_to_slice = bytes_per_element; - } - auto element = static_cast(input.slice(i * bytes_per_element, bytes_to_slice)); - elements.emplace_back(element); - } - return hash(builder, elements); -} template class poseidon2; template class poseidon2; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.hpp index 8998843311a5..1b84031a4dec 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.hpp @@ -7,11 +7,8 @@ #pragma once #include "barretenberg/crypto/poseidon2/poseidon2_params.hpp" #include "barretenberg/stdlib/hash/poseidon2/sponge/sponge.hpp" -#include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" -#include "../../primitives/circuit_builders/circuit_builders.hpp" - namespace bb::stdlib { /** @@ -24,15 +21,12 @@ template class poseidon2 { private: using field_ct = stdlib::field_t; - using bool_ct = stdlib::bool_t; using Params = crypto::Poseidon2Bn254ScalarFieldParams; - using Permutation = Poseidon2Permutation; - // We choose our rate to be t-1 and capacity to be 1. - using Sponge = FieldSponge; + using Permutation = Poseidon2Permutation; + using Sponge = FieldSponge; public: - static field_ct hash(Builder& builder, const std::vector& in); - static field_ct hash_buffer(Builder& builder, const stdlib::byte_array& input); + static field_ct hash(const std::vector& in); }; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp index 6ce5709b5024..d2d60cdd4d66 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp @@ -3,7 +3,7 @@ #include "barretenberg/common/test.hpp" #include "barretenberg/crypto/poseidon2/poseidon2.hpp" #include "barretenberg/numeric/random/engine.hpp" -#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/ultra_honk/prover_instance.hpp" using namespace bb; namespace { @@ -14,13 +14,34 @@ template class StdlibPoseidon2 : public testing::Test { using _curve = stdlib::bn254; using byte_array_ct = typename _curve::byte_array_ct; - using fr_ct = typename _curve::ScalarField; + using field_ct = typename _curve::ScalarField; using witness_ct = typename _curve::witness_ct; using public_witness_ct = typename _curve::public_witness_ct; using poseidon2 = typename stdlib::poseidon2; using native_poseidon2 = crypto::Poseidon2; public: + static field_ct w_hex(Builder& builder, const char* hex) { return witness_ct(&builder, uint256_t(hex)); } + static field_ct w_u64(Builder& builder, uint64_t v) { return witness_ct(&builder, uint256_t(v)); } + + static std::size_t gate_count(std::size_t N) + { + if (N == 1) { + return 73; + } + const size_t P_cost = 73; + const size_t D_full_adds = 3; + + // Number of Poseidon2 permutation invocations + size_t P_N = (N + 2) / 3; + // Number of extra additions in sqeeze + size_t N_3 = N % 3; + if (N_3 == 0) { + return (1 + P_N * P_cost + (P_N - 1) * D_full_adds); + } else { + return (1 + P_N * P_cost + (P_N - 2) * D_full_adds + N_3); + } + } /** * @brief Call poseidon2 on a vector of inputs * @@ -28,8 +49,6 @@ template class StdlibPoseidon2 : public testing::Test { */ static void test_hash(size_t num_inputs) { - using field_ct = stdlib::field_t; - using witness_ct = stdlib::witness_t; auto builder = Builder(); std::vector inputs; @@ -40,10 +59,13 @@ template class StdlibPoseidon2 : public testing::Test { inputs_native.emplace_back(element); inputs.emplace_back(field_ct(witness_ct(&builder, element))); } + size_t num_gates_start = builder.get_estimated_num_finalized_gates(); - auto result = stdlib::poseidon2::hash(builder, inputs); + auto result = stdlib::poseidon2::hash(inputs); auto expected = crypto::Poseidon2::hash(inputs_native); + EXPECT_EQ(gate_count(num_inputs), builder.get_estimated_num_finalized_gates() - num_gates_start); + EXPECT_EQ(result.get_value(), expected); bool proof_result = CircuitChecker::check(builder); @@ -62,12 +84,12 @@ template class StdlibPoseidon2 : public testing::Test { fr left_in = fr::random_element(); fr right_in = fr::random_element(); - fr_ct left = witness_ct(&builder, left_in); - fr_ct right = witness_ct(&builder, right_in); + field_ct left = witness_ct(&builder, left_in); + field_ct right = witness_ct(&builder, right_in); // num_inputs - 1 iterations since the first hash hashes two elements for (size_t i = 0; i < num_inputs - 1; ++i) { - left = poseidon2::hash(builder, { left, right }); + left = poseidon2::hash({ left, right }); } builder.set_public_input(left.witness_index); @@ -77,33 +99,6 @@ template class StdlibPoseidon2 : public testing::Test { bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } - /** - * @brief Call poseidon2 hash_buffer on a vector of bytes - * - * @param num_input_bytes - */ - static void test_hash_byte_array(size_t num_input_bytes) - { - Builder builder; - - std::vector input; - input.reserve(num_input_bytes); - for (size_t i = 0; i < num_input_bytes; ++i) { - input.push_back(engine.get_random_uint8()); - } - - fr expected = native_poseidon2::hash_buffer(input); - - byte_array_ct circuit_input(&builder, input); - auto result = poseidon2::hash_buffer(builder, circuit_input); - - EXPECT_EQ(result.get_value(), expected); - - info("num gates = ", builder.get_estimated_num_finalized_gates()); - - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); - } static void test_hash_zeros(size_t num_inputs) { @@ -119,7 +114,7 @@ template class StdlibPoseidon2 : public testing::Test { } fr expected = native_poseidon2::hash(inputs); - auto result = poseidon2::hash(builder, witness_inputs); + auto result = poseidon2::hash(witness_inputs); EXPECT_EQ(result.get_value(), expected); } @@ -129,24 +124,208 @@ template class StdlibPoseidon2 : public testing::Test { Builder builder; std::vector inputs; - std::vector> witness_inputs; + std::vector witness_inputs; for (size_t i = 0; i < 8; ++i) { inputs.push_back(bb::fr::random_element()); if (i % 2 == 1) { witness_inputs.push_back(witness_ct(&builder, inputs[i])); } else { - witness_inputs.push_back(fr_ct(&builder, inputs[i])); + witness_inputs.push_back(field_ct(&builder, inputs[i])); } } - fr expected = native_poseidon2::hash(inputs); - auto result = poseidon2::hash(builder, witness_inputs); + native_poseidon2::hash(inputs); + EXPECT_THROW_OR_ABORT(poseidon2::hash(witness_inputs), ".*Sponge inputs should not be stdlib constants.*"); + } - EXPECT_EQ(result.get_value(), expected); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1413): Investigate why this fails. Sees like we - // don't allow poseidon2 to take in constants. - EXPECT_FALSE(CircuitChecker::check(builder)); + static void test_padding_collisions() + { + Builder builder; + + const field_ct random_input(witness_ct(&builder, fr::random_element())); + const field_ct zero(witness_ct(&builder, 0)); + + std::vector witness_inputs_len_1{ random_input }; + std::vector witness_inputs_len_2{ random_input, zero }; + std::vector witness_inputs_len_3{ random_input, zero, zero }; + std::vector> inputs{ witness_inputs_len_1, witness_inputs_len_2, witness_inputs_len_3 }; + + std::vector hashes(3); + + for (size_t idx = 0; idx < 3; idx++) { + hashes[idx] = poseidon2::hash(inputs[idx]).get_value(); + } + + // The domain separation IV depends on the input size, therefore, the hashes must not coincide. + EXPECT_NE(hashes[1], hashes[2]); + EXPECT_NE(hashes[2], hashes[3]); + EXPECT_NE(hashes[1], hashes[3]); + } + + // Test vectors and the expected values are taken from https://github.com/zemse/poseidon2-evm + static void test_against_independent_values() + { + struct TV { + // inputs given as hex strings + std::vector in_hex; + // expected hash (hex) + const char* expected_hex; + }; + + std::vector vectors = { + // Hash single element + { { "0x0000000000000000000000000000000000000000000000000000000000000000" }, + "0x2710144414c3a5f2354f4c08d52ed655b9fe253b4bf12cb9ad3de693d9b1db11" }, + + { { "0x19f3d19e5dd8b29a42cea4f71ebc6b12a42a5edbafbcb89cf0cddf0995d44e7f" }, + "0x2e1874598412a3b19f824ff246a1949a38b365bcdd58807eedb9206288820232" }, + + { { "0x0049c16ff91dacf0cb573751342f7bfa0042819e3f15fce57d61339f6340b0c1" }, + "0x011b5e5c76aedd3a361d2c72c711425f5709988da3c226bb8536691d81b190ac" }, + + { { "0x02713077725e5498d596be781be4c9a7353dbfe70ff10dc17702e66d0b5d388c" }, + "0x2ee1f0fc41b250f0e26dbaa1e9dc0d8e27e9354baf3c11eca131994c119f5651" }, + + { { "0x2a84a08a631f4c391b02f4519720881a80c25eb6ba3b59b2ca8f3c0e22ebeebc" }, + "0x0cf97e83eb7aa42f60e85095836d3b0afe3e88cc5046e1bae7318a64a0d32fd5" }, + + // Length 2 inputs + { { "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" }, + "0x0b63a53787021a4a962a452c2921b3663aff1ffd8d5510540f8e659e782956f1" }, + { { "0x1762d324c2db6a912e607fd09664aaa02dfe45b90711c0dae9627d62a4207788", + "0x1047bd52da536f6bdd26dfe642d25d9092c458e64a78211298648e81414cbf35" }, + "0x303cacb84a267e5f3f46914fd3262dcaa212930c27a2f9de22c080dd9857be35" }, + { { "0x0a529bb6bbbf25ed33a47a4637dc70eb469a29893047482866748ae7f3a5afe1", + "0x1f5189751e18cf788deb219bdb95461e86ca08a882e4af5f8a6ec478e1ec73b4" }, + "0x1f71000626ba87581561f94a17f7b9962be8f1aa8d0c69c6f95348c9ddffe542" }, + { { "0x14ba77172ab2278bdf5a087ca0bd400e936bafe6dfc092c4e7a1b0950f1b6dbe", + "0x195c41f12d4fbac5e194c201536f3094541e73bf27d9f2413f09e731b3838733" }, + "0x06b53c119381e6ccee8e3ac9845970ba96befbce39606fad3686a6e31ac7761e" }, + + { { "0x2079041f0d6becd26db3ec659c54f60464243d86c3982978f1217a5f1413ed3a", + "0x08146641a4e30689442ecd270a7efef725bdb3036bf3d837dff683161a455de1" }, + "0x07512888968a4cfc7e5da7537c7f4448bf04d3679e711c3f16ca86351b0d1e6b" }, + + // Length 3 inputs + { { "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" }, + "0x2a5de47ed300af27b706aaa14762fc468f5cfc16cd8116eb6b09b0f2643ca2b9" }, + + { { "0x300ced31bf248a1a2d4ea02b5e9f302a9e34df3c2109d5f1046ee9f59de6f6f1", + "0x2e6eb409ed7f41949cdb1925ac3ec68132b2443d873589a8afde4c027c3c0b68", + "0x2f08443953fc54fb351e41a46da99bbec1d290dae2907d2baf5174ed28eee9ea" }, + "0x27e4cf07e4bf24219f6a2da9be19cea601313a95f8a1360cf8f15d474826bf49" }, + + { { "0x21c6e3a949ec9c622e7e062efab3d6e1d5ee62763f99366627de157e26d179b7", + "0x1369519755b97bf50d10be283f853c5607ed1308f8235cd34c915783cbf7c70d", + "0x00c632d6fe8be14eddb11aee10b63e017e7d1f1a275d01e9d215f4135e950e7d" }, + "0x0d72d2b806a47af41117f7de2147908eb0496f99c0f58fbd6f5e778174132451" }, + + { { "0x26001a9f75eddc947cfc2e7a5524a7fb32d226404468168d58ff2f918e027015", + "0x0080918fd6c6f3867a46a0260e2cc2bc6b0b6bcb4528d33ba2bb57555a12929a", + "0x05217f0ada44bebd06d9fd34358fe7213446bdce940d943916acb59cabdc0001" }, + "0x2015f0a35636e589ae36e65f00b8f8c8a0cab01dcd080c23c8e2274283a57c85" }, + + { { "0x2a7efeb117f6894cfa21375d4496249c43eda35b5d5d267b7154413fdaad44ea", + "0x0bb48b46f770205618e2350c2f9c294524425763fb7ef6156c33c5bc2b8c1cbf", + "0x0206c176b0c66212fd12a580a8dd38689ae29dfa15933ba1982f5d30ed0c2ea1" }, + "0x1c93b16c25fa1e7b4933425b32876fa97c2013d44214792231634d3c96b6cc3f" }, + + // Length 10 inputs + { { "0x187b4757bbf034ce7daaf3fdf2b735544d88da2033dea124500ff79484bce862" }, + "0x0961b57effa18e2dcbf5671e9400d10bc9214fbf39b149cfd8949731f61d2bfa" }, + + { { "0x0143bc5d854ca7c33c27ad96dfe2c2443dd272a30a99f90f8f892ac7bbb4370e", + "0x2eb3d5587861dac05dfed4b7099dbdef57fc644504f297afb890c2df0c7212e7" }, + "0x0d34db78174c82c6a7d60cbf21f0fce80ef4ddc67e65881d6daca4e5ad8cd52d" }, + + { { "0x030341179f0ef88190aa557f9b20bb4aa31f805f214e72741b5d3b3a2bccf5b6", + "0x00b88879f5765438ef691e40ad4a6223d421317c342888de56c7f8b995e27688", + "0x0727b4522492cfbe1b5be97a17ed5672487fd1dc2ad3d09bd033c55d1ba40c70" }, + "0x22024dabdd6a9dfb47eb26f9569fd048968a0db30c60f3f38d8b61274458437e" }, + + { { "0x0891e9efa2b82224dccfee5171614168f84c4c99443c7e6e2753433a978f5955", + "0x01c81114d1f4eb857dfe3a8479760fd0c8e33d9ed6f42f8ee3eef974b85ef937", + "0x03a2238b91de1214a385af17ade25f2e71b6364b4d54dfb6e7ec96fd12be5a65", + "0x24cc93df58f07c156dd648edac3318420325db58ff1cccbc3d9a3cdb529f8469" }, + "0x24f3009e0089df4ae82f5dcde988fd9738ede4a6f51788c11c69b3e43a01b42b" }, + + { { "0x283318d1c946e5a788c59267712f20383a610189d89d39c161d53e2ae22b9bb5", + "0x2f51c6b6cc030846b73d449e6f8f9cbe609038bc4ccdb4268dc486e1a0d9694f", + "0x04c4fb583a2d9d9ceb1927de6d251e09fa01950313d6640863de039a37bd376a", + "0x18f1ec5070a8f50dbb71bf03d130fccc420161b8ee5e6c6ffdb676c7a7d33189", + "0x11e539f3dd6bb505dde162c84f22eee58f2a95a62027f230442b26c8dc3f96fc" }, + "0x1cbfcd7746c46fcfa7ae67d32e0cafb6ac348ebcb6f5a5e8c579ec6daa96362b" }, + + { { "0x01c93b681eb77d4d6af59d8f753d49d3a8839208f44471e68d67e52758cc923d", + "0x2a8f0abbca50b48935358452633b55e694d4d03357d7d5bdfddf08a117ada3b9", + "0x0c1a9c0b4e4a5315f111c078e411360bf54af39bfe2ffa2ce094aa6d57aa3343", + "0x12e80d94354e709fdc78c0d646a3763dd4dbeada2c7b27553f23cc6c32823e82", + "0x065929de60742283ec95df48428ca27e72bc8d4d114f172aff17c237b208d056", + "0x23ceb931dc1b76a8915466e0faedf56a5fe2169e650248663d9fecb75e5fa156" }, + "0x145023e2318ab81ba31e50cc62713441762c5132d5e6acbe6e88fd9f816473f1" }, + + { { "0x213bba8cab8dac55a2a86deaabcd4303ad2b0762fd1d11950d54bc54aa056623", + "0x00872f0dd93bd2868b85a7822d07d31d18ccf92228c38167de7804319a019fa3", + "0x198ea672f81fd916f47ae9a5f24d6740c5e5e5c8a389166ce02d85731e71d8af", + "0x0f362421b36759e1364d4ba7e5894381f56009843afe307aee6cb28a58ab4702", + "0x28a750154b9935407757f85a9f2596ddf082ce5f74256fd8c1200fc04a5f6548", + "0x0044d022f6220947659be7ed057a37adebc8468fce1bc365b76b7664595dd31d", + "0x22a2c8eff174ea66dee3d53dda9d45d37b90b3c2d6820f233fb868f4b41fc83c" }, + "0x16f71f10bf199529eadbbf20b0aefd1aae7afdd756c385da64d4e74474b9623c" }, + + { { "0x1d40c16d71a56182ae856c7f69412c62b29f1afd21376c9fe615c6bcd013723d", + "0x03281f89b9ac18f3a8199b7116453790560d43f2d1b70debb10379b6702b5e31", + "0x2391e7ffcf81c4fbe0bd44cd89ad173d6d1d9bf7c4325ca4b195f863b9fb9da9", + "0x0ecbc17f1a32f3163bc8bc704711aebedb0ad69d29f224fc8fb0851e7fc9c8c4", + "0x1ea5f8133edf27df211a66c7ba4304b9131cbde0b1832c2a182d9869a77c491b", + "0x224b14a1040873fd8f154e6d7b65db283ed2c422e5d14aa06ce3ed9d3cc69743", + "0x1bda12ff5af5e9a1b1f7dd8febb87e253ca0a4e43b16cd3b79818007f6f8d1bb", + "0x24a873345d569136d18164069fc60749aa57f8930ec8a52adde1f01967afbb7c" }, + "0x1b63be96f9b6bdeb09f103968aabac69252137ec863177a93a516e8120d662c4" }, + + { { "0x04e673b5f868ff850c5db58bb9f32b60c564b09b57261fdcb45c32013379ec21", + "0x0ff51c3255f4bd470e3263e5295b757d091fc015b1348b0b30b8fcfa5cc9b818", + "0x08f7a1ec99bf817777942e1132ccd237980a01f1d3d9914bbaf4d8e74320a1ee", + "0x2f6deadbdaa28308134784b3615596492085de03f479f59e79044e5860c76b27", + "0x141a2f00b0cbc606fe887c806d46659a6dac8a0d15829e36f06e43eec13eb324", + "0x1a0eb99c6f3c44026e61a5e6e36c806a25db6cf674c5f0075210dfdd00122264", + "0x0be48f952b90e3dfad74a2b04ade94e7b02b677702bf32d94389ff1669fa9911", + "0x18de646adb3e2f5e2ac7cd21dbfbf9dbe91d97b9cfb5afc2e7735c6f292d4ffe", + "0x174bfedb2323aecff5c4952313b81d9b3fcda8ff71a4b762bd16bb9779afb731" }, + "0x2a33a41e1e3cca17e7b7a000ac5cfcb7f7783023c0563acb39ae5bfe2e0d3c8e" }, + + { { "0x041035d2043ca613be600dc3643dff43a343548bfeb85f19ab2ff31dfec42fd1", + "0x0233cd5d2fff9f11b3675f38a7fd7f04efbfe9a95f114824c2e9c98e97a52d3e", + "0x2251141b94a8f419455457672f188b942079916217c2e1eefd7eceab022e8ba1", + "0x08d2cfe2bd8e054aa3488c2d8a6f7628503da3f4d450e30d4fea46a8700d4a26", + "0x1eb978a79a501b8df8c6312f3474e1f41d439492fdfed4240cbfef6b0a7b47a2", + "0x05e385c9b9093003379e7111b3d83846694b15a3072355bc1a6df3eeff6e95f4", + "0x0d91626c7f7e0ff655a973452f3eae713893f57ce2e078978e00ef0ffab48f67", + "0x0d51fcc8cdc21676e2d0d44d2caebcbedbd0c7f3149e7a2cb24f0f923c718f50", + "0x23cf8f1eda161dc7114e4774216a96a51430a4ed00bb94a9c22ffaf8158d9331", + "0x2060c8e16eaa344a1eb20bbc4179ad36c6c3d503716f329ce268677ecb76172f" }, + "0x1eab26c4915afff7148c904edac0220dc6b86dca67ee342db5705027c4e489f1" }, + }; + + for (const auto& tv : vectors) { + Builder builder; + // build inputs as witnesses + std::vector ins; + ins.reserve(tv.in_hex.size()); + for (auto* h : tv.in_hex) { + ins.emplace_back(w_hex(builder, h)); + } + + // compute poseidon2 hash (variable-length API) + auto got = poseidon2::hash(ins).get_value(); + fr expected(uint256_t(tv.expected_hex)); + + EXPECT_EQ(got, expected); + } } }; @@ -161,7 +340,14 @@ TYPED_TEST(StdlibPoseidon2, TestHashZeros) TYPED_TEST(StdlibPoseidon2, TestHashSmall) { + TestFixture::test_hash(1); + TestFixture::test_hash(6); TestFixture::test_hash(10); + TestFixture::test_hash(16); + TestFixture::test_hash(17); + TestFixture::test_hash(18); + TestFixture::test_hash(23); + TestFixture::test_hash(24); } TYPED_TEST(StdlibPoseidon2, TestHashLarge) @@ -174,17 +360,16 @@ TYPED_TEST(StdlibPoseidon2, TestHashRepeatedPairs) TestFixture::test_hash_repeated_pairs(256); } -TYPED_TEST(StdlibPoseidon2, TestHashByteArraySmall) +TYPED_TEST(StdlibPoseidon2, TestHashConstants) { - TestFixture::test_hash_byte_array(351); + TestFixture::test_hash_constants(); }; - -TYPED_TEST(StdlibPoseidon2, TestHashByteArrayLarge) +TYPED_TEST(StdlibPoseidon2, TestHashPadding) { - TestFixture::test_hash_byte_array(31000); + TestFixture::test_padding_collisions(); }; - -TYPED_TEST(StdlibPoseidon2, TestHashConstants) +TYPED_TEST(StdlibPoseidon2, Consistency) { - TestFixture::test_hash_constants(); -}; + + TestFixture::test_against_independent_values(); +} diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp index 671b3b096948..f1a1a45152c4 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp @@ -7,41 +7,30 @@ #include "poseidon2_permutation.hpp" #include "barretenberg/honk/execution_trace/gate_data.hpp" -#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" -#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" namespace bb::stdlib { -/** - * @brief Circuit form of Poseidon2 permutation from https://eprint.iacr.org/2023/323 for MegaCircuitBuilder. - * @details The permutation consists of one initial linear layer, then a set of external rounds, a set of internal - * rounds, and a set of external rounds. - * @param builder - * @param input - * @return State - */ -template -typename Poseidon2Permutation::State Poseidon2Permutation::permutation( - Builder* builder, const typename Poseidon2Permutation::State& input) +template +typename Poseidon2Permutation::State Poseidon2Permutation::permutation( + Builder* builder, const typename Poseidon2Permutation::State& input) { - // deep copy State current_state(input); NativeState current_native_state; for (size_t i = 0; i < t; ++i) { current_native_state[i] = current_state[i].get_value(); } - // Apply 1st linear layer + // Apply 1st linear layer both natively and in-circuit. NativePermutation::matrix_multiplication_external(current_native_state); - matrix_multiplication_external(builder, current_state); + matrix_multiplication_external(current_state); // First set of external rounds constexpr size_t rounds_f_beginning = rounds_f / 2; for (size_t i = 0; i < rounds_f_beginning; ++i) { - poseidon2_external_gate_ in{ current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index, + poseidon2_external_gate_ in{ current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index(), i }; builder->create_poseidon2_external_gate(in); // calculate the new witnesses @@ -53,21 +42,15 @@ typename Poseidon2Permutation::State Poseidon2Permutationcreate_dummy_gate(builder->blocks.poseidon2_external, - current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index); + propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_external); // Internal rounds const size_t p_end = rounds_f_beginning + rounds_p; for (size_t i = rounds_f_beginning; i < p_end; ++i) { - poseidon2_internal_gate_ in{ current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index, + poseidon2_internal_gate_ in{ current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index(), i }; builder->create_poseidon2_internal_gate(in); current_native_state[0] += round_constants[i][0]; @@ -78,20 +61,14 @@ typename Poseidon2Permutation::State Poseidon2Permutationcreate_dummy_gate(builder->blocks.poseidon2_internal, - current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index); + propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_internal); // Remaining external rounds for (size_t i = p_end; i < NUM_ROUNDS; ++i) { - poseidon2_external_gate_ in{ current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index, + poseidon2_external_gate_ in{ current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index(), i }; builder->create_poseidon2_external_gate(in); // calculate the new witnesses @@ -102,131 +79,47 @@ typename Poseidon2Permutation::State Poseidon2Permutation(builder, current_native_state[j]); } } - // The Poseidon2 permutation is 64 rounds, but needs to be a block of 65 rows, since the result of - // applying a round of Poseidon2 is stored in the next row (the shifted row). As a result, we need this end row to - // compare with the result from the 64th round of Poseidon2. Note that it does not activate any selectors since it - // only serves as a comparison through the shifted wires. - builder->create_dummy_gate(builder->blocks.poseidon2_external, - current_state[0].witness_index, - current_state[1].witness_index, - current_state[2].witness_index, - current_state[3].witness_index); + + propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_external); + return current_state; } /** * @brief Separate function to do just the first linear layer (equivalent to external matrix mul). - * @details We use 6 arithmetic gates to implement: - * gate 1: Compute tmp1 = state[0] + state[1] + 2 * state[3] - * gate 2: Compute tmp2 = 2 * state[1] + state[2] + state[3] - * gate 3: Compute v2 = 4 * state[0] + 4 * state[1] + tmp2 - * gate 4: Compute v1 = v2 + tmp1 - * gate 5: Compute v4 = tmp1 + 4 * state[2] + 4 * state[3] - * gate 6: Compute v3 = v4 + tmp2 - * output state is [v1, v2, v3, v4] - * @param builder - * @param state + * @details Update the state with \f$ M_E \cdot (\text{state}[0], \text{state}[1], \text{state}[2], + * \text{state}[3])^{\top}\f$. Where \f$ M_E \f$ is the external round matrix. See `Poseidon2ExternalRelationImpl`. */ -template -void Poseidon2Permutation::matrix_multiplication_external( - Builder* builder, typename Poseidon2Permutation::State& state) +template +void Poseidon2Permutation::matrix_multiplication_external(typename Poseidon2Permutation::State& state) { + const bb::fr two(2); + const bb::fr four(4); // create the 6 gates for the initial matrix multiplication // gate 1: Compute tmp1 = state[0] + state[1] + 2 * state[3] - field_t tmp1 = - witness_t(builder, state[0].get_value() + state[1].get_value() + FF(2) * state[3].get_value()); - builder->create_big_add_gate({ - .a = state[0].witness_index, - .b = state[1].witness_index, - .c = state[3].witness_index, - .d = tmp1.witness_index, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = 2, - .d_scaling = -1, - .const_scaling = 0, - }); + field_t tmp1 = state[0].add_two(state[1], state[3] * two); // gate 2: Compute tmp2 = 2 * state[1] + state[2] + state[3] - field_t tmp2 = - witness_t(builder, FF(2) * state[1].get_value() + state[2].get_value() + state[3].get_value()); - builder->create_big_add_gate({ - .a = state[1].witness_index, - .b = state[2].witness_index, - .c = state[3].witness_index, - .d = tmp2.witness_index, - .a_scaling = 2, - .b_scaling = 1, - .c_scaling = 1, - .d_scaling = -1, - .const_scaling = 0, - }); + field_t tmp2 = state[2].add_two(state[1] * two, state[3]); // gate 3: Compute v2 = 4 * state[0] + 4 * state[1] + tmp2 - field_t v2 = - witness_t(builder, FF(4) * state[0].get_value() + FF(4) * state[1].get_value() + tmp2.get_value()); - builder->create_big_add_gate({ - .a = state[0].witness_index, - .b = state[1].witness_index, - .c = tmp2.witness_index, - .d = v2.witness_index, - .a_scaling = 4, - .b_scaling = 4, - .c_scaling = 1, - .d_scaling = -1, - .const_scaling = 0, - }); + state[1] = tmp2.add_two(state[0] * four, state[1] * four); // gate 4: Compute v1 = v2 + tmp1 - field_t v1 = witness_t(builder, v2.get_value() + tmp1.get_value()); - builder->create_big_add_gate({ - .a = v2.witness_index, - .b = tmp1.witness_index, - .c = v1.witness_index, - .d = builder->zero_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .d_scaling = 0, - .const_scaling = 0, - }); + state[0] = state[1] + tmp1; // gate 5: Compute v4 = tmp1 + 4 * state[2] + 4 * state[3] - field_t v4 = - witness_t(builder, tmp1.get_value() + FF(4) * state[2].get_value() + FF(4) * state[3].get_value()); - builder->create_big_add_gate({ - .a = tmp1.witness_index, - .b = state[2].witness_index, - .c = state[3].witness_index, - .d = v4.witness_index, - .a_scaling = 1, - .b_scaling = 4, - .c_scaling = 4, - .d_scaling = -1, - .const_scaling = 0, - }); + state[3] = tmp1.add_two(state[2] * four, state[3] * four); // gate 6: Compute v3 = v4 + tmp2 - field_t v3 = witness_t(builder, v4.get_value() + tmp2.get_value()); - builder->create_big_add_gate({ - .a = v4.witness_index, - .b = tmp2.witness_index, - .c = v3.witness_index, - .d = builder->zero_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .d_scaling = 0, - .const_scaling = 0, - }); - - state[0] = v1; - state[1] = v2; - state[2] = v3; - state[3] = v4; + state[2] = state[3] + tmp2; + + // This can only happen if the input contained constant `field_t` elements. + ASSERT(state[0].is_normalized() && state[1].is_normalized() && state[2].is_normalized() && + state[3].is_normalized()); } -template class Poseidon2Permutation; -template class Poseidon2Permutation; +template class Poseidon2Permutation; +template class Poseidon2Permutation; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp index 25122eeae134..d356561b2448 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp @@ -10,24 +10,32 @@ #include #include "barretenberg/crypto/poseidon2/poseidon2_permutation.hpp" -#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" namespace bb::stdlib { -template class Poseidon2Permutation { +/** + * @brief Circuit form of Poseidon2 permutation from https://eprint.iacr.org/2023/323. + * @details The permutation consists of one initial linear layer, then a set of external rounds, a set of internal + * rounds, and a set of external rounds. + * + * Note that except for the inital linear layer, we compute the round results natively and record them into Poseidon2 + * custom gates. This allows us to heavily reduce the number of arithmetic gates, that would have been otherwise + * required to perform expensive non-linear S-box operations in-circuit. + * + * The external rounds are constrained via `Poseidon2ExternalRelationImpl`. + * The internal rounds are constrained via `Poseidon2InternalRelationImpl`. + * + */ +template class Poseidon2Permutation { public: + using Params = crypto::Poseidon2Bn254ScalarFieldParams; using NativePermutation = crypto::Poseidon2Permutation; // t = sponge permutation size (in field elements) // t = rate + capacity - // capacity = 1 field element (256 bits) + // capacity = 1 field element // rate = number of field elements that can be compressed per permutation static constexpr size_t t = Params::t; - // d = degree of s-box polynomials. For a given field, `d` is the smallest element of `p` such that gdc(d, p - 1) = - // 1 (excluding 1) For bn254/grumpkin, d = 5 - static constexpr size_t d = Params::d; - // sbox size = number of bits in p - static constexpr size_t sbox_size = Params::sbox_size; // number of full sbox rounds static constexpr size_t rounds_f = Params::rounds_f; // number of partial sbox rounds @@ -53,19 +61,28 @@ template class Poseidon2Permutation { static State permutation(Builder* builder, const State& input); /** - * @brief Separate function to do just the first linear layer (equivalent to external matrix mul). - * @details We use 6 arithmetic gates to implement: - * gate 1: Compute tmp1 = state[0] + state[1] + 2 * state[3] - * gate 2: Compute tmp2 = 2 * state[1] + state[2] + state[3] - * gate 3: Compute v2 = 4 * state[0] + 4 * state[1] + tmp2 - * gate 4: Compute v1 = v2 + tmp1 - * gate 5: Compute v4 = tmp1 + 4 * state[2] + 4 * state[3] - * gate 6: Compute v3 = v4 + tmp2 - * output state is [v1, v2, v3, v4] + * @brief In-circuit method to efficiently multiply the inital state by the external matrix \f$ M_E \f$. Uses 6 + * aritmetic gates. + */ + static void matrix_multiplication_external(State& state); + + /** + * @brief The result of applying a round of Poseidon2 is stored in the next row and is accessed by Poseidon2 + * Internal and External Relations via the shifts mechanism. Note that it does not activate any selectors since it + * only serves to store the values. See `Poseidon2ExternalRelationImpl` and `Poseidon2InternalRelationImpl` docs. + * * @param builder - * @param state + * @param state an array of `t` field_t elements + * @param block Either `poseidon2_external` or `poseidon2_internal` block of the Execution Trace */ - static void matrix_multiplication_external(Builder* builder, State& state); + static void propagate_current_state_to_next_row(Builder* builder, const State& state, auto& block) + { + builder->create_unconstrained_gate(block, + state[0].get_witness_index(), + state[1].get_witness_index(), + state[2].get_witness_index(), + state[3].get_witness_index()); + }; }; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/sponge/sponge.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/sponge/sponge.hpp index f970ad9fd284..3c24b29d6e94 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/sponge/sponge.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/sponge/sponge.hpp @@ -13,170 +13,111 @@ #include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" namespace bb::stdlib { /** * @brief Implements the circuit form of a cryptographic sponge over prime fields. - * Implements the sponge specification from the Community Cryptographic Specification Project - * see https://github.com/C2SP/C2SP/blob/792c1254124f625d459bfe34417e8f6bdd02eb28/poseidon-sponge.md - * (Note: this spec was not accepted into the C2SP repo, we might want to reference something else!) * - * Note: If we ever use this sponge class for more than 1 hash functions, we should move this out of `poseidon2` - * and into its own directory - * @tparam field_t - * @tparam rate - * @tparam capacity - * @tparam t - * @tparam Permutation + * @tparam Builder A circuit builder class. Can be Ultra- or MegaCircuitBuilder. */ -template class FieldSponge { - public: - /** - * @brief Defines what phase of the sponge algorithm we are in. - * - * ABSORB: 'absorbing' field elements into the sponge - * SQUEEZE: compressing the sponge and extracting a field element - * - */ - enum Mode { - ABSORB, - SQUEEZE, - }; +template class FieldSponge { + private: + using Permutation = Poseidon2Permutation; + static constexpr size_t t = crypto::Poseidon2Bn254ScalarFieldParams::t; // = 4 + static constexpr size_t capacity = 1; + static constexpr size_t rate = t - capacity; // = 3 + using field_t = stdlib::field_t; // sponge state. t = rate + capacity. capacity = 1 field element (~256 bits) - std::array state; + std::array state{}; // cached elements that have been absorbed. - std::array cache; + std::array cache{}; size_t cache_size = 0; - Mode mode = Mode::ABSORB; Builder* builder; - FieldSponge(Builder& builder_, field_t domain_iv = 0) - : builder(&builder_) + FieldSponge(Builder* builder_, size_t in_len) + : builder(builder_) { - for (size_t i = 0; i < rate; ++i) { - state[i] = witness_t::create_constant_witness(builder, 0); - } - state[rate] = witness_t::create_constant_witness(builder, domain_iv.get_value()); + // Add the domain separation to the initial state. + field_t iv(static_cast(in_len) << 64); + iv.convert_constant_to_fixed_witness(builder); + state[rate] = iv; } - std::array perform_duplex() + void perform_duplex() { - // zero-pad the cache - for (size_t i = cache_size; i < rate; ++i) { - cache[i] = witness_t::create_constant_witness(builder, 0); - } - // add the cache into sponge state + // Add the cache into sponge state for (size_t i = 0; i < rate; ++i) { state[i] += cache[i]; } + + // Apply Poseidon2 permutation state = Permutation::permutation(builder, state); - // return `rate` number of field elements from the sponge state. - std::array output; - for (size_t i = 0; i < rate; ++i) { - output[i] = state[i]; - } - // variables with indices from rate to size of state - 1 won't be used anymore - // after permutation. But they aren't dangerous and needed to put in used witnesses - if constexpr (IsUltraBuilder) { - for (size_t i = rate; i < t; i++) { - builder->update_used_witnesses(state[i].witness_index); - } - } - return output; + + // Reset the cache + cache = {}; } void absorb(const field_t& input) { - if (mode == Mode::ABSORB && cache_size == rate) { + if (cache_size == rate) { // If we're absorbing, and the cache is full, apply the sponge permutation to compress the cache perform_duplex(); cache[0] = input; cache_size = 1; - } else if (mode == Mode::ABSORB && cache_size < rate) { + } else { // If we're absorbing, and the cache is not full, add the input into the cache cache[cache_size] = input; cache_size += 1; - } else if (mode == Mode::SQUEEZE) { - // If we're in squeeze mode, switch to absorb mode and add the input into the cache. - // N.B. I don't think this code path can be reached?! - cache[0] = input; - cache_size = 1; - mode = Mode::ABSORB; } } field_t squeeze() { - if (mode == Mode::SQUEEZE && cache_size == 0) { - // If we're in squeze mode and the cache is empty, there is nothing left to squeeze out of the sponge! - // Switch to absorb mode. - mode = Mode::ABSORB; - cache_size = 0; - } - if (mode == Mode::ABSORB) { - // If we're in absorb mode, apply sponge permutation to compress the cache, populate cache with compressed - // state and switch to squeeze mode. Note: this code block will execute if the previous `if` condition was - // matched - auto new_output_elements = perform_duplex(); - mode = Mode::SQUEEZE; - for (size_t i = 0; i < rate; ++i) { - cache[i] = new_output_elements[i]; - } - cache_size = rate; - } - // By this point, we should have a non-empty cache. Pop one item off the top of the cache and return it. - field_t result = cache[0]; - for (size_t i = 1; i < cache_size; ++i) { - cache[i - 1] = cache[i]; - } - cache_size -= 1; - cache[cache_size] = witness_t::create_constant_witness(builder, 0); - return result; + + perform_duplex(); + + return state[0]; } + public: /** - * @brief Use the sponge to hash an input string + * @brief Use the sponge to hash an input vector. * - * @tparam out_len - * @tparam is_variable_length. Distinguishes between hashes where the preimage length is constant/not constant - * @param input - * @return std::array + * @param input Circuit witnesses (a_0, ..., a_{N-1}) + * @return Hash of the input, a single witness field element. */ - template - static std::array hash_internal(Builder& builder, std::span input) + static field_t hash_internal(std::span input) { - size_t in_len = input.size(); - const uint256_t iv = (static_cast(in_len) << 64) + out_len - 1; - FieldSponge sponge(builder, iv); + // Ensure that all inputs belong to the same circuit and extract a pointer to the circuit object. + Builder* builder = validate_context(input); + // Ensure that the pointer is not a `nullptr` + ASSERT(builder); + + // Initialize the sponge state. Input length is used for domain separation. + const size_t in_len = input.size(); + FieldSponge sponge(builder, in_len); + + // Absorb inputs in blocks of size r = 3. Make sure that all inputs are witneesses. for (size_t i = 0; i < in_len; ++i) { + BB_ASSERT_EQ(input[i].is_constant(), false, "Sponge inputs should not be stdlib constants."); sponge.absorb(input[i]); } - std::array output; - for (size_t i = 0; i < out_len; ++i) { - output[i] = sponge.squeeze(); - } - // variables with indices won't be used in the circuit. - // but they aren't dangerous and needed to put in used witnesses - if constexpr (IsUltraBuilder) { - for (const auto& elem : sponge.cache) { - if (elem.witness_index != IS_CONSTANT) { - builder.update_used_witnesses(elem.witness_index); - } - } + // Perform final duplex call. At this point, cache contains `m = in_len % 3` input elements and 3 - m constant + // zeroes served as padding. + field_t output = sponge.squeeze(); + + // The final state consists of 4 elements, we only use the first element, which means that the remaining + // 3 witnesses are only used in a single gate. + for (const auto& elem : sponge.state) { + builder->update_used_witnesses(elem.witness_index); } return output; } - - static field_t hash_internal(Builder& builder, std::span input) - { - return hash_internal<1>(builder, input)[0]; - } }; -} // namespace bb::stdlib \ No newline at end of file +} // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp index 2efac7dbdbcc..5772a9b4df57 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp @@ -25,17 +25,9 @@ constexpr size_t get_num_blocks(const size_t num_bits) template void SHA256::prepare_constants(std::array, 8>& input) { - constexpr uint64_t init_constants[8]{ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; - - input[0] = init_constants[0]; - input[1] = init_constants[1]; - input[2] = init_constants[2]; - input[3] = init_constants[3]; - input[4] = init_constants[4]; - input[5] = init_constants[5]; - input[6] = init_constants[6]; - input[7] = init_constants[7]; + for (size_t i = 0; i < 8; i++) { + input[i] = init_constants[i]; + } } template @@ -90,43 +82,28 @@ std::array, 64> SHA256::extend_witness(const std::arra w_right = convert_witness(w_right.normal); } - constexpr fr base(16); - constexpr fr left_multipliers[4]{ - (base.pow(32 - 7) + base.pow(32 - 18)), - (base.pow(32 - 18 + 3) + 1), - (base.pow(32 - 18 + 10) + base.pow(10 - 7) + base.pow(10 - 3)), - (base.pow(18 - 7) + base.pow(18 - 3) + 1), - }; - - constexpr fr right_multipliers[4]{ - base.pow(32 - 17) + base.pow(32 - 19), - base.pow(32 - 17 + 3) + base.pow(32 - 19 + 3), - base.pow(32 - 19 + 10) + fr(1), - base.pow(18 - 17) + base.pow(18 - 10), - }; - - field_pt left[4]{ + std::array left{ w_left.sparse_limbs[0] * left_multipliers[0], w_left.sparse_limbs[1] * left_multipliers[1], w_left.sparse_limbs[2] * left_multipliers[2], w_left.sparse_limbs[3] * left_multipliers[3], }; - field_pt right[4]{ + std::array right{ w_right.sparse_limbs[0] * right_multipliers[0], w_right.sparse_limbs[1] * right_multipliers[1], w_right.sparse_limbs[2] * right_multipliers[2], w_right.sparse_limbs[3] * right_multipliers[3], }; - const auto left_xor_sparse = + const field_pt left_xor_sparse = left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limbs[1]) * fr(4); - const auto xor_result_sparse = right[0] - .add_two(right[1], right[2]) - .add_two(right[3], w_right.rotated_limbs[2]) - .add_two(w_right.rotated_limbs[3], left_xor_sparse) - .normalize(); + const field_pt xor_result_sparse = right[0] + .add_two(right[1], right[2]) + .add_two(right[3], w_right.rotated_limbs[2]) + .add_two(w_right.rotated_limbs[3], left_xor_sparse) + .normalize(); field_pt xor_result = plookup_read::read_from_1_to_2_table(SHA256_WITNESS_OUTPUT, xor_result_sparse); @@ -261,18 +238,6 @@ std::array, 8> SHA256::sha256_block(const std::array, 16>& input) { typedef field_t field_pt; - - constexpr uint64_t round_constants[64]{ - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 - }; - /** * Initialize round variables with previous block output **/ @@ -340,44 +305,75 @@ std::array, 8> SHA256::sha256_block(const std::array packed_byte_array SHA256::hash(const packed_byte_array& input) +template byte_array SHA256::hash(const byte_array_ct& input) { - typedef field_t field_pt; - Builder* ctx = input.get_context(); + std::vector message_schedule; + const size_t message_length_bytes = input.size(); - packed_byte_array message_schedule(input); + for (size_t idx = 0; idx < message_length_bytes; idx++) { + message_schedule.push_back(input[idx]); + } - const size_t message_bits = message_schedule.size() * 8; - message_schedule.append(field_t(ctx, 128), 1); + message_schedule.push_back(field_ct(ctx, 128)); constexpr size_t bytes_per_block = 64; + // Include message length const size_t num_bytes = message_schedule.size() + 8; const size_t num_blocks = num_bytes / bytes_per_block + (num_bytes % bytes_per_block != 0); const size_t num_total_bytes = num_blocks * bytes_per_block; + // Pad with zeroes to make the number divisible by 64 for (size_t i = num_bytes; i < num_total_bytes; ++i) { - message_schedule.append(field_t(ctx, 0), 1); + message_schedule.push_back(field_ct(ctx, 0)); } - message_schedule.append(field_t(ctx, message_bits), 8); + // Append the message length bits represented as a byte array of length 8. + const size_t message_bits = message_length_bytes * 8; + byte_array_ct message_length_byte_decomposition(field_ct(message_bits), 8); - const std::vector slices = message_schedule.to_unverified_byte_slices(4); + for (size_t idx = 0; idx < 8; idx++) { + message_schedule.push_back(message_length_byte_decomposition[idx]); + } + + // Compute 4-byte slices + std::vector slices; + + for (size_t i = 0; i < message_schedule.size(); i += 4) { + std::vector chunk; + for (size_t j = 0; j < 4; ++j) { + const size_t shift = 8 * (3 - j); + chunk.push_back(message_schedule[i + j] * field_ct(ctx, uint256_t(1) << shift)); + } + slices.push_back(field_ct::accumulate(chunk)); + } constexpr size_t slices_per_block = 16; - std::array rolling_hash; + std::array rolling_hash; prepare_constants(rolling_hash); for (size_t i = 0; i < num_blocks; ++i) { - std::array hash_input; + std::array hash_input; for (size_t j = 0; j < 16; ++j) { hash_input[j] = slices[i * slices_per_block + j]; } rolling_hash = sha256_block(rolling_hash, hash_input); } - std::vector output(rolling_hash.begin(), rolling_hash.end()); - return packed_byte_array(output, 4); + std::vector output; + // Each element of rolling_hash is a 4-byte field_t, decompose rolling hash into bytes. + for (const auto& word : rolling_hash) { + // This constructor constrains + // - word length to be <=4 bytes + // - the element reconstructed from bytes is equal to the given input. + // - each entry to be a byte + byte_array_ct word_byte_decomposition(word, 4); + for (size_t i = 0; i < 4; i++) { + output.push_back(word_byte_decomposition[i]); + } + } + // + return byte_array(ctx, output); } template class SHA256; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.hpp index b814d5d8558e..7fd7cd380477 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.hpp @@ -5,6 +5,7 @@ // ===================== #pragma once +#include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/plookup_tables.hpp" #include @@ -12,28 +13,59 @@ #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp" #include "../../primitives/field/field.hpp" -#include "../../primitives/packed_byte_array/packed_byte_array.hpp" namespace bb::stdlib { template class SHA256 { + + using field_ct = field_t; + using byte_array_ct = byte_array; struct sparse_ch_value { - field_t normal; - field_t sparse; - field_t rot6; - field_t rot11; - field_t rot25; + field_ct normal; + field_ct sparse; + field_ct rot6; + field_ct rot11; + field_ct rot25; }; struct sparse_maj_value { - field_t normal; - field_t sparse; - field_t rot2; - field_t rot13; - field_t rot22; + field_ct normal; + field_ct sparse; + field_ct rot2; + field_ct rot13; + field_ct rot22; + }; + + static constexpr uint64_t init_constants[8]{ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; + + static constexpr fr base{ 16 }; + + static constexpr std::array left_multipliers{ + (base.pow(32 - 7) + base.pow(32 - 18)), + (base.pow(32 - 18 + 3) + 1), + (base.pow(32 - 18 + 10) + base.pow(10 - 7) + base.pow(10 - 3)), + (base.pow(18 - 7) + base.pow(18 - 3) + 1), + }; + + static constexpr std::array right_multipliers{ + base.pow(32 - 17) + base.pow(32 - 19), + base.pow(32 - 17 + 3) + base.pow(32 - 19 + 3), + base.pow(32 - 19 + 10) + fr(1), + base.pow(18 - 17) + base.pow(18 - 10), }; + static constexpr uint64_t round_constants[64]{ + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + }; struct sparse_witness_limbs { - sparse_witness_limbs(const field_t& in = 0) + sparse_witness_limbs(const field_ct& in = 0) { normal = in; has_sparse_limbs = false; @@ -44,21 +76,21 @@ template class SHA256 { sparse_witness_limbs& operator=(const sparse_witness_limbs& other) = default; sparse_witness_limbs& operator=(sparse_witness_limbs&& other) = default; - field_t normal; + field_ct normal; - std::array, 4> sparse_limbs; + std::array sparse_limbs; - std::array, 4> rotated_limbs; + std::array rotated_limbs; bool has_sparse_limbs = false; }; struct sparse_value { - sparse_value(const field_t& in = 0) + sparse_value(const field_ct& in = 0) { normal = in; if (normal.witness_index == IS_CONSTANT) { - sparse = field_t(in.get_context(), - bb::fr(numeric::map_into_sparse_form<16>(uint256_t(in.get_value()).data[0]))); + sparse = field_ct(in.get_context(), + bb::fr(numeric::map_into_sparse_form<16>(uint256_t(in.get_value()).data[0]))); } } @@ -68,27 +100,27 @@ template class SHA256 { sparse_value& operator=(const sparse_value& other) = default; sparse_value& operator=(sparse_value&& other) = default; - field_t normal; - field_t sparse; + field_ct normal; + field_ct sparse; }; - static void prepare_constants(std::array, 8>& input); - static sparse_witness_limbs convert_witness(const field_t& w); + static void prepare_constants(std::array& input); + static sparse_witness_limbs convert_witness(const field_ct& w); - static field_t choose(sparse_value& e, const sparse_value& f, const sparse_value& g); + static field_ct choose(sparse_value& e, const sparse_value& f, const sparse_value& g); - static field_t majority(sparse_value& a, const sparse_value& b, const sparse_value& c); - static sparse_value map_into_choose_sparse_form(const field_t& e); - static sparse_value map_into_maj_sparse_form(const field_t& e); + static field_ct majority(sparse_value& a, const sparse_value& b, const sparse_value& c); + static sparse_value map_into_choose_sparse_form(const field_ct& e); + static sparse_value map_into_maj_sparse_form(const field_ct& e); - static field_t add_normalize(const field_t& a, const field_t& b); + static field_ct add_normalize(const field_ct& a, const field_ct& b); public: - static std::array, 8> sha256_block(const std::array, 8>& h_init, - const std::array, 16>& input); + static std::array sha256_block(const std::array& h_init, + const std::array& input); - static std::array, 64> extend_witness(const std::array, 16>& w_in); + static std::array extend_witness(const std::array& w_in); - static packed_byte_array hash(const packed_byte_array& input); + static byte_array hash(const byte_array_ct& input); }; } // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp index c42ba8ddaf9e..a6134819f43b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp @@ -18,10 +18,26 @@ auto& engine = numeric::get_debug_randomness(); using Builder = UltraCircuitBuilder; using byte_array_ct = byte_array; -using packed_byte_array_ct = packed_byte_array; using field_ct = field_t; using witness_ct = witness_t; +/** + * @brief Given a `byte_array` object, slice it into chunks of size `num_bytes_in_chunk` and compute field elements + * reconstructed from these chunks. + */ + +std::vector pack_bytes_into_field_elements(const byte_array_ct& input, size_t num_bytes_in_chunk = 4) +{ + std::vector> result; + const size_t byte_len = input.size(); + + for (size_t i = 0; i < byte_len; i += num_bytes_in_chunk) { + byte_array_ct chunk = input.slice(i, std::min(num_bytes_in_chunk, byte_len - i)); + result.emplace_back(static_cast(chunk)); + } + + return result; +} constexpr uint64_t ror(uint64_t val, uint64_t shift) { return (val >> (shift & 31U)) | (val << (32U - (shift & 31U))); @@ -146,17 +162,15 @@ std::array inner_block(std::array& w) TEST(stdlib_sha256, test_plookup_55_bytes) { - typedef stdlib::field_t field_pt; - typedef stdlib::packed_byte_array packed_byte_array_pt; // 55 bytes is the largest number of bytes that can be hashed in a single block, // accounting for the single padding bit, and the 64 size bits required by the SHA-256 standard. auto builder = UltraCircuitBuilder(); - packed_byte_array_pt input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); + byte_array_ct input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(uint256_t(output[0].get_value()), 0x51b2529fU); EXPECT_EQ(uint256_t(output[1].get_value()), 0x872e839aU); @@ -177,11 +191,11 @@ TEST(stdlib_sha256, test_55_bytes) // 55 bytes is the largest number of bytes that can be hashed in a single block, // accounting for the single padding bit, and the 64 size bits required by the SHA-256 standard. auto builder = Builder(); - packed_byte_array_ct input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); + byte_array_ct input(&builder, "An 8 character password? Snow White and the 7 Dwarves.."); - packed_byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), fr(0x51b2529fULL)); EXPECT_EQ(output[1].get_value(), fr(0x872e839aULL)); @@ -197,16 +211,14 @@ TEST(stdlib_sha256, test_55_bytes) EXPECT_EQ(proof_result, true); } -TEST(stdlib_sha256, test_NIST_vector_one_packed_byte_array) +TEST(stdlib_sha256, test_NIST_vector_one_byte_array) { - typedef stdlib::field_t field_pt; - typedef stdlib::packed_byte_array packed_byte_array_pt; auto builder = UltraCircuitBuilder(); - packed_byte_array_pt input(&builder, "abc"); - packed_byte_array_pt output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bytes.to_unverified_byte_slices(4); + byte_array_ct input(&builder, "abc"); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(uint256_t(output[0].get_value()).data[0], (uint64_t)0xBA7816BFU); EXPECT_EQ(uint256_t(output[1].get_value()).data[0], (uint64_t)0x8F01CFEAU); EXPECT_EQ(uint256_t(output[2].get_value()).data[0], (uint64_t)0x414140DEU); @@ -223,16 +235,14 @@ TEST(stdlib_sha256, test_NIST_vector_one_packed_byte_array) TEST(stdlib_sha256, test_NIST_vector_one) { - typedef stdlib::field_t field_pt; - typedef stdlib::packed_byte_array packed_byte_array_pt; auto builder = UltraCircuitBuilder(); - packed_byte_array_pt input(&builder, "abc"); + byte_array_ct input(&builder, "abc"); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), fr(0xBA7816BFULL)); EXPECT_EQ(output[1].get_value(), fr(0x8F01CFEAULL)); @@ -254,9 +264,9 @@ TEST(stdlib_sha256, test_NIST_vector_two) byte_array_ct input(&builder, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); - byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = packed_byte_array_ct(output_bits).to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), 0x248D6A61ULL); EXPECT_EQ(output[1].get_value(), 0xD20638B8ULL); @@ -279,9 +289,9 @@ TEST(stdlib_sha256, test_NIST_vector_three) // one byte, 0xbd byte_array_ct input(&builder, std::vector{ 0xbd }); - byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = packed_byte_array_ct(output_bits).to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), 0x68325720ULL); EXPECT_EQ(output[1].get_value(), 0xaabd7c82ULL); @@ -304,9 +314,9 @@ TEST(stdlib_sha256, test_NIST_vector_four) // 4 bytes, 0xc98c8e55 byte_array_ct input(&builder, std::vector{ 0xc9, 0x8c, 0x8e, 0x55 }); - byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = packed_byte_array_ct(output_bits).to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), 0x7abc22c0ULL); EXPECT_EQ(output[1].get_value(), 0xae5af26cULL); @@ -325,12 +335,10 @@ TEST(stdlib_sha256, test_NIST_vector_four) HEAVY_TEST(stdlib_sha256, test_NIST_vector_five) { - typedef stdlib::field_t field_pt; - typedef stdlib::packed_byte_array packed_byte_array_pt; auto builder = UltraCircuitBuilder(); - packed_byte_array_pt input( + byte_array_ct input( &builder, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" @@ -343,9 +351,9 @@ HEAVY_TEST(stdlib_sha256, test_NIST_vector_five) "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAA"); - packed_byte_array_pt output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - std::vector output = output_bits.to_unverified_byte_slices(4); + std::vector output = pack_bytes_into_field_elements(output_bytes); EXPECT_EQ(output[0].get_value(), 0xc2e68682ULL); EXPECT_EQ(output[1].get_value(), 0x3489ced2ULL); @@ -372,9 +380,9 @@ TEST(stdlib_sha256, test_input_len_multiple) auto input_buf = std::vector(inp, 1); byte_array_ct input(&builder, input_buf); - byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - auto circuit_output = output_bits.get_value(); + auto circuit_output = output_bytes.get_value(); auto expected = crypto::sha256(input_buf); @@ -416,9 +424,9 @@ TEST(stdlib_sha256, test_input_str_len_multiple) auto input_buf = std::vector(input_str.begin(), input_str.end()); byte_array_ct input(&builder, input_buf); - byte_array_ct output_bits = stdlib::SHA256::hash(input); + byte_array_ct output_bytes = stdlib::SHA256::hash(input); - auto circuit_output = output_bits.get_value(); + auto circuit_output = output_bytes.get_value(); auto expected = crypto::sha256(input_buf); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp index ac108bfed09a..9f3057351e36 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp @@ -44,11 +44,10 @@ DeciderRecursiveVerifier_::PairingPoints DeciderRecursiveVerifier_load_proof(proof); VerifierCommitments commitments{ accumulator->vk_and_hash->vk, accumulator->witness_commitments }; + // DeciderRecursiveVerifier's log circuit size is fixed, hence we are using a trivial `padding_indicator_array`. + std::vector padding_indicator_array(Flavor::VIRTUAL_LOG_N, 1); - const auto padding_indicator_array = - compute_padding_indicator_array(accumulator->vk_and_hash->vk->log_circuit_size); - - Sumcheck sumcheck(transcript, accumulator->alphas, CONST_PROOF_SIZE_LOG_N, accumulator->target_sum); + Sumcheck sumcheck(transcript, accumulator->alphas, Flavor::VIRTUAL_LOG_N, accumulator->target_sum); SumcheckOutput output = sumcheck.verify(accumulator->relation_parameters, accumulator->gate_challenges, padding_indicator_array); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.hpp index 46c171ee6ca6..9c4e725e2e0b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.hpp @@ -9,7 +9,7 @@ #include "barretenberg/honk/proof_system/types/proof.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/proof/proof.hpp" -#include "barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_key.hpp" +#include "barretenberg/stdlib/protogalaxy_verifier/recursive_verifier_instance.hpp" #include "barretenberg/stdlib/transcript/transcript.hpp" #include "barretenberg/sumcheck/sumcheck.hpp" @@ -23,15 +23,15 @@ template class DeciderRecursiveVerifier_ { using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; using Builder = typename Flavor::CircuitBuilder; using PairingPoints = stdlib::recursion::PairingPoints; - using RecursiveDeciderVK = RecursiveDeciderVerificationKey_; - using NativeDeciderVK = bb::DeciderVerificationKey_; + using RecursiveVerifierInstance = RecursiveVerifierInstance_; + using NativeVerifierInstance = bb::VerifierInstance_; using Transcript = bb::BaseTranscript>; using StdlibProof = bb::stdlib::Proof; public: - explicit DeciderRecursiveVerifier_(Builder* builder, std::shared_ptr accumulator) + explicit DeciderRecursiveVerifier_(Builder* builder, std::shared_ptr accumulator) : builder(builder) - , accumulator(std::make_shared(builder, accumulator)){}; + , accumulator(std::make_shared(builder, accumulator)) {}; /** * @brief Construct a decider recursive verifier directly from a stdlib accumulator, returned by a prior iteration @@ -42,14 +42,17 @@ template class DeciderRecursiveVerifier_ { * @param builder * @param accumulator */ - explicit DeciderRecursiveVerifier_(Builder* builder, std::shared_ptr accumulator) + explicit DeciderRecursiveVerifier_(Builder* builder, + std::shared_ptr accumulator, + const std::shared_ptr& transcript) : builder(builder) + , transcript(transcript) { if (this->builder == accumulator->builder) { this->accumulator = std::move(accumulator); } else { - this->accumulator = std::make_shared( - this->builder, std::make_shared(accumulator->get_value())); + this->accumulator = std::make_shared( + this->builder, std::make_shared(accumulator->get_value())); } } @@ -58,7 +61,7 @@ template class DeciderRecursiveVerifier_ { VerifierCommitmentKey pcs_verification_key; Builder* builder; - std::shared_ptr accumulator; + std::shared_ptr accumulator; std::shared_ptr transcript = std::make_shared(); }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ipa_accumulator.hpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ipa_accumulator.hpp index 4382553b16cd..7fb0962b9d17 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ipa_accumulator.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ipa_accumulator.hpp @@ -10,7 +10,9 @@ namespace bb::stdlib::recursion::honk { template struct IpaAccumulator { std::vector u_challenges_inv; // inverses of u challenges that represent the polynomial h; could be an array - typename Curve::Group comm; // commitment to the polynomial h + typename Curve::Group comm; // commitment to the polynomial h (a.k.a. the challenge polynomial): ∏_{i ∈ [k]} (1 + + // u-challenges-inv_{len-i}.X^{2^{i-1}}) + bool running_truth_value; // the running truth value of the accumulator (not in-circuit) }; } // namespace bb::stdlib::recursion::honk diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.cpp index f2feac9c2eca..ca97b888fa5d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.cpp @@ -19,10 +19,10 @@ namespace bb::stdlib::recursion::honk { template OinkRecursiveVerifier_::OinkRecursiveVerifier_(Builder* builder, - const std::shared_ptr& decider_vk, + const std::shared_ptr& verifier_inst, const std::shared_ptr& transcript, std::string domain_separator) - : decider_vk(decider_vk) + : verifier_inst(verifier_inst) , builder(builder) , transcript(transcript) , domain_separator(std::move(domain_separator)) @@ -30,9 +30,9 @@ OinkRecursiveVerifier_::OinkRecursiveVerifier_(Builder* builder, template OinkRecursiveVerifier_::OinkRecursiveVerifier_(Builder* builder, - const std::shared_ptr& decider_vk, + const std::shared_ptr& verifier_inst, std::string domain_separator) - : decider_vk(decider_vk) + : verifier_inst(verifier_inst) , builder(builder) , domain_separator(std::move(domain_separator)) {} @@ -50,14 +50,16 @@ template void OinkRecursiveVerifier_::verify() WitnessCommitments commitments; CommitmentLabels labels; - FF vkey_hash = decider_vk->vk_and_hash->vk->add_hash_to_transcript(domain_separator, *transcript); - vinfo("vk hash in Oink recursive verifier: ", vkey_hash); - vinfo("expected vk hash: ", decider_vk->vk_and_hash->hash); + FF vk_hash = verifier_inst->vk_and_hash->vk->hash_through_transcript(domain_separator, *transcript); + transcript->add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); + vinfo("vk hash in Oink recursive verifier: ", vk_hash); + vinfo("expected vk hash: ", verifier_inst->vk_and_hash->hash); // Check that the vk hash matches the hash of the verification key - decider_vk->vk_and_hash->hash.assert_equal(vkey_hash); + verifier_inst->vk_and_hash->hash.assert_equal(vk_hash); size_t num_public_inputs = - static_cast(static_cast(decider_vk->vk_and_hash->vk->num_public_inputs.get_value())); + static_cast(static_cast(verifier_inst->vk_and_hash->vk->num_public_inputs.get_value())); + std::vector public_inputs; for (size_t i = 0; i < num_public_inputs; ++i) { public_inputs.emplace_back( transcript->template receive_from_prover(domain_separator + "public_input_" + std::to_string(i))); @@ -105,11 +107,8 @@ template void OinkRecursiveVerifier_::verify() } } - const FF public_input_delta = compute_public_input_delta(public_inputs, - beta, - gamma, - decider_vk->vk_and_hash->vk->log_circuit_size, - decider_vk->vk_and_hash->vk->pub_inputs_offset); + const FF public_input_delta = compute_public_input_delta( + public_inputs, beta, gamma, verifier_inst->vk_and_hash->vk->pub_inputs_offset); // Get commitment to permutation and lookup grand products commitments.z_perm = transcript->template receive_from_prover(domain_separator + labels.z_perm); @@ -123,10 +122,12 @@ template void OinkRecursiveVerifier_::verify() // It is more efficient to generate an array of challenges than to generate them individually. SubrelationSeparators alphas = transcript->template get_challenges(challenge_labels); - decider_vk->relation_parameters = + verifier_inst->relation_parameters = RelationParameters{ eta, eta_two, eta_three, beta, gamma, public_input_delta }; - decider_vk->witness_commitments = std::move(commitments); - decider_vk->alphas = std::move(alphas); + verifier_inst->witness_commitments = std::move(commitments); + verifier_inst->alphas = std::move(alphas); + verifier_inst->public_inputs = std::move(public_inputs); + verifier_inst->is_complete = true; // instance has been completely populated } template class OinkRecursiveVerifier_>; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.hpp index fdfb309e6dc4..c43d8c33b4a7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/oink_recursive_verifier.hpp @@ -5,7 +5,7 @@ // ===================== #pragma once -#include "barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_key.hpp" +#include "barretenberg/stdlib/protogalaxy_verifier/recursive_verifier_instance.hpp" #include "barretenberg/stdlib/transcript/transcript.hpp" namespace bb::stdlib::recursion::honk { @@ -15,7 +15,7 @@ template class OinkRecursiveVerifier_ { using FF = typename Flavor::FF; using Commitment = typename Flavor::Commitment; using GroupElement = typename Flavor::GroupElement; - using RecursiveDeciderVK = RecursiveDeciderVerificationKey_; + using RecursiveVerifierInstance = RecursiveVerifierInstance_; using VerificationKey = typename Flavor::VerificationKey; using Builder = typename Flavor::CircuitBuilder; using SubrelationSeparators = typename Flavor::SubrelationSeparators; @@ -34,7 +34,7 @@ template class OinkRecursiveVerifier_ { * @param domain_separator string used for differentiating verification_keys in the transcript (PG only) */ explicit OinkRecursiveVerifier_(Builder* builder, - const std::shared_ptr& decider_vk, + const std::shared_ptr& verifier_inst, const std::shared_ptr& transcript, std::string domain_separator = ""); @@ -46,7 +46,7 @@ template class OinkRecursiveVerifier_ { * @param domain_separator string used for differentiating verification_keys in the transcript (PG only) */ explicit OinkRecursiveVerifier_(Builder* builder, - const std::shared_ptr& decider_vk, + const std::shared_ptr& verifier_inst, std::string domain_separator = ""); /** @@ -61,8 +61,7 @@ template class OinkRecursiveVerifier_ { */ void verify_proof(const OinkProof& proof); - std::shared_ptr decider_vk; - std::vector public_inputs; + std::shared_ptr verifier_inst; Builder* builder; std::shared_ptr transcript = std::make_shared(); std::string domain_separator; // used in PG to distinguish between verification_keys in transcript diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp index 6533c68d1f1a..8fd8c16758bd 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp @@ -21,7 +21,7 @@ template UltraRecursiveVerifier_::UltraRecursiveVerifier_(Builder* builder, const std::shared_ptr& vk_and_hash, const std::shared_ptr& transcript) - : key(std::make_shared(builder, vk_and_hash)) + : verifier_instance(std::make_shared(builder, vk_and_hash)) , builder(builder) , transcript(transcript) {} @@ -45,7 +45,8 @@ UltraRecursiveVerifier_::Output UltraRecursiveVerifier_::verify_ using ClaimBatcher = ClaimBatcher_; using ClaimBatch = ClaimBatcher::Batch; - const size_t num_public_inputs = static_cast(key->vk_and_hash->vk->num_public_inputs.get_value()); + const size_t num_public_inputs = + static_cast(verifier_instance->vk_and_hash->vk->num_public_inputs.get_value()); BB_ASSERT_EQ(proof.size(), Flavor::NativeFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + num_public_inputs); StdlibProof ipa_proof; @@ -64,32 +65,37 @@ UltraRecursiveVerifier_::Output UltraRecursiveVerifier_::verify_ honk_proof = proof; } transcript->load_proof(honk_proof); - OinkVerifier oink_verifier{ builder, key, transcript }; + OinkVerifier oink_verifier{ builder, verifier_instance, transcript }; oink_verifier.verify(); - const std::vector& public_inputs = oink_verifier.public_inputs; + const std::vector& public_inputs = verifier_instance->public_inputs; - VerifierCommitments commitments{ key->vk_and_hash->vk, key->witness_commitments }; - - auto gate_challenges = std::vector(CONST_PROOF_SIZE_LOG_N); - for (size_t idx = 0; idx < CONST_PROOF_SIZE_LOG_N; idx++) { - gate_challenges[idx] = transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx)); - } + VerifierCommitments commitments{ verifier_instance->vk_and_hash->vk, verifier_instance->witness_commitments }; + static constexpr size_t VIRTUAL_LOG_N = Flavor::NativeFlavor::VIRTUAL_LOG_N; + // Get the gate challenges for sumcheck computation + verifier_instance->gate_challenges = + transcript->template get_powers_of_challenge("Sumcheck:gate_challenge", VIRTUAL_LOG_N); // Execute Sumcheck Verifier and extract multivariate opening point u = (u_0, ..., u_{d-1}) and purported // multivariate evaluations at u - const auto padding_indicator_array = - compute_padding_indicator_array(key->vk_and_hash->vk->log_circuit_size); + std::vector padding_indicator_array(VIRTUAL_LOG_N, 1); + if constexpr (Flavor::HasZK) { + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1521): ZK Recursive verifiers need to evaluate + // RowDisablingPolynomial, which requires knowing the actual `log_circuit_size`. Can be fixed by reserving the + // first rows of the trace for masking. + padding_indicator_array = + compute_padding_indicator_array(verifier_instance->vk_and_hash->vk->log_circuit_size); + } - Sumcheck sumcheck(transcript, key->alphas, CONST_PROOF_SIZE_LOG_N); + Sumcheck sumcheck(transcript, verifier_instance->alphas, VIRTUAL_LOG_N); // Receive commitments to Libra masking polynomials std::array libra_commitments = {}; if constexpr (Flavor::HasZK) { libra_commitments[0] = transcript->template receive_from_prover("Libra:concatenation_commitment"); } - SumcheckOutput sumcheck_output = - sumcheck.verify(key->relation_parameters, gate_challenges, padding_indicator_array); + SumcheckOutput sumcheck_output = sumcheck.verify( + verifier_instance->relation_parameters, verifier_instance->gate_challenges, padding_indicator_array); // For MegaZKFlavor: the sumcheck output contains claimed evaluations of the Libra polynomials if constexpr (Flavor::HasZK) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp index d2105e78b407..8e69f00ccb84 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.hpp @@ -55,7 +55,7 @@ template class UltraRecursiveVerifier_ { using FF = typename Flavor::FF; using Commitment = typename Flavor::Commitment; using GroupElement = typename Flavor::GroupElement; - using RecursiveDeciderVK = RecursiveDeciderVerificationKey_; + using RecursiveVerifierInstance = RecursiveVerifierInstance_; using VerificationKey = typename Flavor::VerificationKey; using VKAndHash = typename Flavor::VKAndHash; using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; @@ -74,8 +74,8 @@ template class UltraRecursiveVerifier_ { [[nodiscard("IPA claim and Pairing points should be accumulated")]] Output verify_proof(const StdlibProof& proof); // TODO(https://github.com/AztecProtocol/barretenberg/issues/1364): Improve VKs. Clarify the usage of - // RecursiveDeciderVK here. Seems unnecessary. - std::shared_ptr key; + // RecursiveVerifierInstances here. Seems unnecessary. + std::shared_ptr verifier_instance; VerifierCommitmentKey pcs_verification_key; Builder* builder; std::shared_ptr transcript; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.test.cpp index 523d5922b9f0..6acd5e3214f5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.test.cpp @@ -38,7 +38,7 @@ template class RecursiveVerifierTest : public testing using InnerProver = UltraProver_; using InnerVerifier = UltraVerifier_; using InnerBuilder = typename InnerFlavor::CircuitBuilder; - using InnerDeciderProvingKey = DeciderProvingKey_; + using InnerProverInstance = ProverInstance_; using InnerCommitment = InnerFlavor::Commitment; using InnerFF = InnerFlavor::FF; using InnerIO = std::conditional_t, @@ -54,7 +54,7 @@ template class RecursiveVerifierTest : public testing std::conditional_t, UltraRollupFlavor, UltraFlavor>>; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; - using OuterDeciderProvingKey = DeciderProvingKey_; + using OuterProverInstance = ProverInstance_; using OuterStdlibProof = bb::stdlib::Proof; using OuterIO = std::conditional_t, bb::stdlib::recursion::honk::RollupIO, // If RecursiveFlavor has IPA, then @@ -127,18 +127,19 @@ template class RecursiveVerifierTest : public testing OuterBuilder outer_circuit; // Compute native verification key - auto proving_key = std::make_shared(inner_circuit); - auto honk_vk = std::make_shared(proving_key->get_precomputed()); + auto prover_instance = std::make_shared(inner_circuit); + auto honk_vk = std::make_shared(prover_instance->get_precomputed()); auto stdlib_vk_and_hash = std::make_shared(outer_circuit, honk_vk); // Instantiate the recursive verifier using the native verification key RecursiveVerifier verifier{ &outer_circuit, stdlib_vk_and_hash }; // Spot check some values in the recursive VK to ensure it was constructed correctly - EXPECT_EQ(static_cast(verifier.key->vk_and_hash->vk->log_circuit_size.get_value()), + EXPECT_EQ(static_cast(verifier.verifier_instance->vk_and_hash->vk->log_circuit_size.get_value()), honk_vk->log_circuit_size); - EXPECT_EQ(static_cast(verifier.key->vk_and_hash->vk->num_public_inputs.get_value()), + EXPECT_EQ(static_cast(verifier.verifier_instance->vk_and_hash->vk->num_public_inputs.get_value()), honk_vk->num_public_inputs); - for (auto [vk_poly, native_vk_poly] : zip_view(verifier.key->vk_and_hash->vk->get_all(), honk_vk->get_all())) { + for (auto [vk_poly, native_vk_poly] : + zip_view(verifier.verifier_instance->vk_and_hash->vk->get_all(), honk_vk->get_all())) { EXPECT_EQ(vk_poly.get_value(), native_vk_poly); } } @@ -158,11 +159,11 @@ template class RecursiveVerifierTest : public testing auto inner_circuit = create_inner_circuit(inner_size); // Generate a proof over the inner circuit - auto inner_proving_key = std::make_shared(inner_circuit); + auto inner_prover_instance = std::make_shared(inner_circuit); auto verification_key = - std::make_shared(inner_proving_key->get_precomputed()); - InnerProver inner_prover(inner_proving_key, verification_key); - info("test circuit size: ", inner_proving_key->dyadic_size()); + std::make_shared(inner_prover_instance->get_precomputed()); + InnerProver inner_prover(inner_prover_instance, verification_key); + info("test circuit size: ", inner_prover_instance->dyadic_size()); auto inner_proof = inner_prover.construct_proof(); // Create a recursive verification circuit for the proof of the inner circuit @@ -187,9 +188,9 @@ template class RecursiveVerifierTest : public testing }; inputs.set_public(); - auto outer_proving_key = std::make_shared(outer_circuit); + auto outer_prover_instance = std::make_shared(outer_circuit); auto outer_verification_key = - std::make_shared(outer_proving_key->get_precomputed()); + std::make_shared(outer_prover_instance->get_precomputed()); return { outer_circuit.blocks, outer_verification_key }; }; @@ -211,9 +212,10 @@ template class RecursiveVerifierTest : public testing auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit - auto proving_key = std::make_shared(inner_circuit); - auto verification_key = std::make_shared(proving_key->get_precomputed()); - InnerProver inner_prover(proving_key, verification_key); + auto prover_instance = std::make_shared(inner_circuit); + auto verification_key = + std::make_shared(prover_instance->get_precomputed()); + InnerProver inner_prover(prover_instance, verification_key); auto inner_proof = inner_prover.construct_proof(); // Create a recursive verification circuit for the proof of the inner circuit @@ -272,16 +274,16 @@ template class RecursiveVerifierTest : public testing // Check 3: Construct and verify a proof of the recursive verifier circuit { - auto proving_key = std::make_shared(outer_circuit); + auto prover_instance = std::make_shared(outer_circuit); auto verification_key = - std::make_shared(proving_key->get_precomputed()); + std::make_shared(prover_instance->get_precomputed()); info("Recursive Verifier: num gates = ", outer_circuit.get_num_finalized_gates()); - OuterProver prover(proving_key, verification_key); + OuterProver prover(prover_instance, verification_key); auto proof = prover.construct_proof(); if constexpr (HasIPAAccumulator) { VerifierCommitmentKey ipa_verification_key = (1 << CONST_ECCVM_LOG_N); OuterVerifier verifier(verification_key, ipa_verification_key); - bool result = verifier.template verify_proof(proof, proving_key->ipa_proof).result; + bool result = verifier.template verify_proof(proof, prover_instance->ipa_proof).result; ASSERT_TRUE(result); } else { OuterVerifier verifier(verification_key); @@ -291,7 +293,7 @@ template class RecursiveVerifierTest : public testing } // Check the size of the recursive verifier if constexpr (std::same_as>) { - uint32_t NUM_GATES_EXPECTED = 873519; + uint32_t NUM_GATES_EXPECTED = 808803; ASSERT_EQ(static_cast(outer_circuit.get_num_finalized_gates()), NUM_GATES_EXPECTED) << "MegaZKHonk Recursive verifier changed in Ultra gate count! Update this value if you " "are sure this is expected."; @@ -310,11 +312,11 @@ template class RecursiveVerifierTest : public testing auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit - auto proving_key = std::make_shared(inner_circuit); + auto prover_instance = std::make_shared(inner_circuit); // Generate the corresponding inner verification key auto inner_verification_key = - std::make_shared(proving_key->get_precomputed()); - InnerProver inner_prover(proving_key, inner_verification_key); + std::make_shared(prover_instance->get_precomputed()); + InnerProver inner_prover(prover_instance, inner_verification_key); auto inner_proof = inner_prover.construct_proof(); // Tamper with the proof to be verified @@ -359,11 +361,11 @@ template class RecursiveVerifierTest : public testing auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit - auto proving_key = std::make_shared(inner_circuit); + auto prover_instance = std::make_shared(inner_circuit); // Generate the corresponding inner verification key auto inner_verification_key = - std::make_shared(proving_key->get_precomputed()); - InnerProver inner_prover(proving_key, inner_verification_key); + std::make_shared(prover_instance->get_precomputed()); + InnerProver inner_prover(prover_instance, inner_verification_key); auto inner_proof = inner_prover.construct_proof(); // Tamper with the proof to be verified diff --git a/barretenberg/cpp/src/barretenberg/stdlib/merge_verifier/merge_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/merge_verifier/merge_verifier.test.cpp index b82513b9df7e..dc9e2efcbf07 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/merge_verifier/merge_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/merge_verifier/merge_verifier.test.cpp @@ -27,7 +27,7 @@ template class RecursiveMergeVerifierTest : public test // Define types relevant for inner circuit using InnerFlavor = MegaFlavor; - using InnerDeciderProvingKey = DeciderProvingKey_; + using InnerProverInstance = ProverInstance_; using InnerBuilder = typename InnerFlavor::CircuitBuilder; // Define additional types for testing purposes @@ -99,6 +99,10 @@ template class RecursiveMergeVerifierTest : public test RecursiveMergeVerifier::Commitment::from_witness(&outer_circuit, merge_commitments.t_commitments[idx]); recursive_merge_commitments.T_prev_commitments[idx] = RecursiveMergeVerifier::Commitment::from_witness( &outer_circuit, merge_commitments.T_prev_commitments[idx]); + // Removing the free witness tag, since the merge commitments in the full scheme are supposed to + // be fiat-shamirred earlier + recursive_merge_commitments.t_commitments[idx].unset_free_witness_tag(); + recursive_merge_commitments.T_prev_commitments[idx].unset_free_witness_tag(); } // Create a recursive merge verification circuit for the merge proof diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/address/address.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/address/address.hpp deleted file mode 100644 index b10873350008..000000000000 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/address/address.hpp +++ /dev/null @@ -1,144 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } -// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } -// ===================== - -#pragma once -#include "barretenberg/numeric/uint256/uint256.hpp" -#include "barretenberg/stdlib/commitment/pedersen/pedersen.hpp" -#include "barretenberg/stdlib/primitives/bool/bool.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" -#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" -#include "barretenberg/stdlib/primitives/witness/witness.hpp" - -// TODO(https://github.com/AztecProtocol/barretenberg/issues/376): Establish whether this type should be here at all. -namespace bb::stdlib { - -// Native type -class address { - public: - fr address_; - - address() noexcept { address_ = fr(); } - - address(address const& other) - : address_(other.address_){}; - - address(fr const& address) - : address_(address){}; - - address(uint256_t const& address) - : address_(address){}; - - address(int const& address) - : address_(fr(address)){}; - - operator fr() { return address_; } - - operator fr() const { return address_; } - - constexpr bool operator==(address const& other) const { return this->address_ == other.address_; } - - friend std::ostream& operator<<(std::ostream& os, address const& v) { return os << v.address_; } - - fr to_field() const { return address_; } - - // delegate serialization to field - void msgpack_pack(auto& packer) const { address_.msgpack_pack(packer); } - void msgpack_unpack(auto const& o) { address_.msgpack_unpack(o); } - // help our msgpack schema compiler with this buffer alias (as far as wire representation is concerned) class - void msgpack_schema(auto& packer) const { packer.pack_alias("Address", "bin32"); } -}; - -template void read(B& it, address& addr) -{ - using serialize::read; - fr address_field; - read(it, address_field); - addr = address(address_field); -} - -template void write(B& buf, address const& addr) -{ - using serialize::write; - write(buf, addr.address_); -} - -// Circuit type -template class address_t { - public: - field_t address_; - Builder* context_; - - address_t() = default; - - address_t(address_t const& other) - : address_(other.address_) - , context_(other.context_){}; - - address_t(field_t const& address) - : address_(address) - , context_(address.context){}; - - address_t(uint256_t const& address) - : address_(address) - , context_(nullptr){}; - - address_t(int const& address) - : address_(address) - , context_(nullptr){}; - - address_t(witness_t const& witness) - { - address_ = field_t(witness); - context_ = witness.context; - } - - address_t& operator=(const address_t& other) - { - address_ = other.address_; - context_ = other.context_; - return *this; - } - - bool_t operator==(const address_t& other) const { return this->to_field() == other.to_field(); } - - bool_t operator!=(const address_t& other) const { return this->to_field() != other.to_field(); } - - field_t to_field() const { return address_; } - - fr get_value() const { return address_.get_value(); }; - - void assert_equal(const address_t& rhs, std::string const& msg = "address_t::assert_equal") const - { - address_.assert_equal(rhs.address_, msg); - }; - - void assert_is_in_set(const std::vector& set, - std::string const& msg = "address_t::assert_is_in_set") const - { - std::vector> field_set; - for (const auto& e : set) { - field_set.push_back(e.address_); - } - address_.assert_is_in_set(field_set, msg); - } - - static address_t conditional_assign(const bool_t& predicate, const address_t& lhs, const address_t& rhs) - { - return field_t::conditional_assign(predicate, lhs.address_, rhs.address_); - }; - - static address_t derive_from_private_key(field_t const& private_key) - { - // TODO: Dummy logic, for now. Proper derivation undecided. - cycle_group public_key = cycle_group(grumpkin::g1::affine_one) * - cycle_group::cycle_scalar::create_from_bn254_scalar(private_key); - return address_t(public_key.x); - } - - friend std::ostream& operator<<(std::ostream& os, address_t const& v) { return os << v.address_; } -}; - -} // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/README.md b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/README.md index 28a151db7b5e..5e190284738b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/README.md +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/README.md @@ -413,22 +413,22 @@ $$ > > $$ > \begin{aligned} -> \textcolor{orange}{D_{\textsf{lo}}} &= (2^{Q} - 1)^2 + 2 \cdot (2^{Q} - 1)^2 \cdot 2^L \ < \ 2^{2Q + L + 1}, \\ -> \textcolor{orange}{D_{\textsf{hi}}} &= 3 \cdot (2^{Q} - 1)^2 + 4 \cdot (2^{Q} - 1)^2 \cdot 2^L \ < \ 2^{2Q + L + 2}. +> \textcolor{orange}{D_{\textsf{lo}}} &= (2^{Q} - 1)^2 + 2 \cdot (2^{Q} - 1)^2 \cdot 2^L \ < \ 2^{2Q + L + 2}, \\ +> \textcolor{orange}{D_{\textsf{hi}}} &= 3 \cdot (2^{Q} - 1)^2 + 4 \cdot (2^{Q} - 1)^2 \cdot 2^L \ < \ 2^{2Q + L + 3}. > \end{aligned} > $$ > > Clearly, maximum value of $\textcolor{orange}{D_{\textsf{hi}}}$ determines the overall maximum of the two outputs of multiplication: > > $$ -> \textsf{max}(\textcolor{orange}{D_{\textsf{lo}}}, \textcolor{orange}{D_{\textsf{hi}}}) < 2^{2Q + L + 2}. +> \textsf{max}(\textcolor{orange}{D_{\textsf{lo}}}, \textcolor{orange}{D_{\textsf{hi}}}) < 2^{2Q + L + 3}. > $$ > > This is the maximum value of the resulting limbs of a single product $d = a \cdot b$. Suppose we have $2^k$ such products, then the maximum value of the limbs of the sum of these products would be: > > $$ > \begin{aligned} -> \sum_{i=1}^{2^k} \textcolor{orange}{D_{\textsf{hi}, i}} & \ < \ 2^{k + 2Q + L + 2}. +> \sum_{i=1}^{2^k} \textcolor{orange}{D_{\textsf{hi}, i}} & \ < \ 2^{k + 2Q + L + 3}. > \end{aligned} > $$ > @@ -436,11 +436,11 @@ $$ > > $$ > \begin{aligned} -> 2^{k + 2Q + L + 2} &< n \quad \implies \quad \boxed{Q < \frac{\text{log}_2(n) - L - k - 2}{2}}. +> 2^{k + 2Q + L + 3} &< n \quad \implies \quad \boxed{Q < \frac{\text{log}_2(n) - L - k - 3}{2}}. > \end{aligned} > $$ > -> This means that the maximum limb size that must be allowed is $Q = \left\lfloor\frac{253.5 - 68 - 10 - 2}{2}\right\rfloor = 86$. +> This means that the maximum limb size that must be allowed is $Q = \left\lfloor\frac{253.5 - 68 - 10 - 3}{2}\right\rfloor = 86$. > > . diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.fuzzer.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.fuzzer.hpp index 58893d6f59d9..fc57c5372b62 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.fuzzer.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.fuzzer.hpp @@ -1000,35 +1000,70 @@ template class BigFieldBase { bigfield_t::msub_div( input_left_bf, input_right_bf, divisor.bigfield, to_sub_bf, enable_divisor_nz_check)); } - // assert_equal uses assert_is_in_field in some cases, so we don't need to check that separately + + // assert_equal uses assert_is_in_field in some cases, so we don't need + // to check that separately void assert_equal(ExecutionHandler& other) { - if (other.bf().is_constant()) { - if (this->bf().is_constant()) { - // Assert equal does nothing in this case - return; + if (other.bf().is_constant() && this->bf().is_constant()) { + // Assert equal does nothing in this case + return; + } + + if (!other.bf().is_constant() && !this->bf().is_constant()) { + auto to_add = bigfield_t(this->bf().context, uint256_t(this->base - other.base)); + auto new_el = other.bf() + to_add; + + this->bf().assert_equal(new_el); + } + + bigfield_t lhs, rhs; + if (other.bf().is_constant() && !this->bf().is_constant()) { + auto to_add = bigfield_t(this->bigfield.context, uint256_t(this->base - other.base)); + auto new_el = other.bf() + to_add; + ASSERT(new_el.is_constant()); + + lhs = this->bigfield; + rhs = new_el; + } + + if (!other.bf().is_constant() && this->bf().is_constant()) { + auto to_add = bigfield_t(this->bf().context, uint256_t(this->base - other.base)); + auto new_el = other.bf() + to_add; + + lhs = new_el; + rhs = this->bf(); + } + + ASSERT(!lhs.is_constant()); + ASSERT(rhs.is_constant()); + + bool overflow = lhs.get_value() >= bb::fq::modulus; + bool reduce = VarianceRNG.next() & 1; + +#ifdef FUZZING_SHOW_INFORMATION + std::cout << "reduce? " << reduce << std::endl; + std::cout << "overflow? " << overflow << std::endl; +#endif + + if (!reduce) { + if (overflow) { + // In case we overflow the modulus, the assert will fail + // see NOTE(https://github.com/AztecProtocol/barretenberg/issues/998) + circuit_should_fail = true; } else { - /* Operate on this->bigfield rather than this->bf() to prevent - * that assert_is_in_field is called on a different object than - * assert_equal. - * - * See also: https://github.com/AztecProtocol/aztec2-internal/issues/1242 - */ - this->bigfield.assert_is_in_field(); - auto to_add = bigfield_t(this->bigfield.context, uint256_t(this->base - other.base)); - this->bigfield.assert_equal(other.bf() + to_add); + // In case we do not overflow, we can be sure that this will pass + lhs.assert_is_in_field(); } } else { - if (this->bf().is_constant()) { - auto to_add = bigfield_t(this->bf().context, uint256_t(this->base - other.base)); - auto new_el = other.bf() + to_add; - new_el.assert_is_in_field(); - this->bf().assert_equal(new_el); - } else { - auto to_add = bigfield_t(this->bf().context, uint256_t(this->base - other.base)); - this->bf().assert_equal(other.bf() + to_add); - } + // otherwise force reduce so we pass it anyway + lhs.reduce_mod_target_modulus(); } + + // swap the sides + if (VarianceRNG.next() & 1) + std::swap(lhs, rhs); + lhs.assert_equal(rhs); } void assert_not_equal(ExecutionHandler& other) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp index f11ca4a36af5..ec7fdb19d558 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp @@ -55,7 +55,7 @@ template class bigfield { } friend std::ostream& operator<<(std::ostream& os, const Limb& a) { - os << "{ " << a.element << " < " << a.maximum_value << " }"; + os << "{ " << a.element << " <= " << a.maximum_value << " }"; return os; } Limb(const Limb& other) = default; @@ -208,14 +208,16 @@ template class bigfield { ctx->range_constrain_two_limbs(result.binary_basis_limbs[0].element.get_normalized_witness_index(), result.binary_basis_limbs[1].element.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS), - static_cast(NUM_LIMB_BITS)); + static_cast(NUM_LIMB_BITS), + "bigfield::construct_from_limbs: limb 0 or 1 too large"); // Range constrain the last two limbs to NUM_LIMB_BITS and NUM_LAST_LIMB_BITS const size_t num_last_limb_bits = (can_overflow) ? NUM_LIMB_BITS : NUM_LAST_LIMB_BITS; ctx->range_constrain_two_limbs(result.binary_basis_limbs[2].element.get_normalized_witness_index(), result.binary_basis_limbs[3].element.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS), - static_cast(num_last_limb_bits)); + static_cast(num_last_limb_bits), + "bigfield::construct_from_limbs: limb 2 or 3 too large"); return result; }; @@ -326,29 +328,25 @@ template class bigfield { static constexpr size_t MAXIMUM_SUMMAND_COUNT_LOG = 4; static constexpr size_t MAXIMUM_SUMMAND_COUNT = 1 << MAXIMUM_SUMMAND_COUNT_LOG; - static constexpr uint256_t prime_basis_maximum_limb = - modulus_u512.slice(NUM_LIMB_BITS * (NUM_LIMBS - 1), NUM_LIMB_BITS* NUM_LIMBS).lo; static constexpr Basis prime_basis{ uint512_t(bb::fr::modulus), bb::fr::modulus.get_msb() + 1 }; static constexpr Basis binary_basis{ uint512_t(1) << LOG2_BINARY_MODULUS, LOG2_BINARY_MODULUS }; static constexpr Basis target_basis{ modulus_u512, static_cast(modulus_u512.get_msb() + 1) }; static constexpr bb::fr shift_1 = bb::fr(uint256_t(1) << NUM_LIMB_BITS); static constexpr bb::fr shift_2 = bb::fr(uint256_t(1) << (NUM_LIMB_BITS * 2)); static constexpr bb::fr shift_3 = bb::fr(uint256_t(1) << (NUM_LIMB_BITS * 3)); - static constexpr bb::fr shift_right_1 = bb::fr(1) / shift_1; - static constexpr bb::fr shift_right_2 = bb::fr(1) / shift_2; - static constexpr bb::fr negative_prime_modulus_mod_binary_basis = -bb::fr(uint256_t(modulus_u512)); - static constexpr uint512_t negative_prime_modulus = binary_basis.modulus - target_basis.modulus; - static constexpr std::array neg_modulus_limbs_u256{ - negative_prime_modulus.slice(0, NUM_LIMB_BITS).lo, - negative_prime_modulus.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2).lo, - negative_prime_modulus.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3).lo, - negative_prime_modulus.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4).lo, + static constexpr bb::fr negative_prime_modulus_mod_native_basis = -bb::fr(uint256_t(target_basis.modulus)); + static constexpr uint512_t negative_prime_modulus_mod_binary_basis = binary_basis.modulus - target_basis.modulus; + static constexpr std::array neg_modulus_mod_binary_basis_limbs_u256{ + negative_prime_modulus_mod_binary_basis.slice(0, NUM_LIMB_BITS).lo, + negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2).lo, + negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3).lo, + negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4).lo, }; - static constexpr std::array neg_modulus_limbs{ - bb::fr(negative_prime_modulus.slice(0, NUM_LIMB_BITS).lo), - bb::fr(negative_prime_modulus.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2).lo), - bb::fr(negative_prime_modulus.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3).lo), - bb::fr(negative_prime_modulus.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4).lo), + static constexpr std::array neg_modulus_mod_binary_basis_limbs{ + bb::fr(negative_prime_modulus_mod_binary_basis.slice(0, NUM_LIMB_BITS).lo), + bb::fr(negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2).lo), + bb::fr(negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3).lo), + bb::fr(negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4).lo), }; /** @@ -409,7 +407,7 @@ template class bigfield { * elements are witnesses. * * @details Naive addition of two bigfield elements would require 5 gates: 4 gates to add the binary basis limbs and - * 1 gate to add the prime basis limbs. However, if both elements are witnesses, we can use an optimised addition + * 1 gate to add the prime basis limbs. However, if both elements are witnesses, we can use an optimized addition * trick that uses 4 gates instead of 5. In this case, we add the prime basis limbs and one of the binary basis * limbs in a single gate. * @@ -509,7 +507,7 @@ template class bigfield { * @return bigfield * * @details Costs the same as operator* as it just sets a = b. - * NOTE(https://github.com/AztecProtocol/aztec-packages/issues/15089): Can optimise this further to save a gate. + * NOTE(https://github.com/AztecProtocol/aztec-packages/issues/15089): Can optimize this further to save a gate. */ bigfield sqr() const; @@ -568,7 +566,7 @@ template class bigfield { const std::vector& mul_right, const bigfield& divisor, const std::vector& to_sub, - bool enable_divisor_nz_check = false); + bool enable_divisor_nz_check = true); static bigfield sum(const std::vector& terms); static bigfield internal_div(const std::vector& numerators, @@ -598,10 +596,12 @@ template class bigfield { bool_t operator==(const bigfield& other) const; - void assert_is_in_field() const; - void assert_less_than(const uint256_t& upper_limit) const; - void assert_equal(const bigfield& other) const; - void assert_is_not_equal(const bigfield& other) const; + void assert_is_in_field(std::string const& msg = "bigfield::assert_is_in_field") const; + void assert_less_than(const uint256_t& upper_limit, std::string const& msg = "bigfield::assert_less_than") const; + void reduce_mod_target_modulus() const; + void assert_equal(const bigfield& other, std::string const& msg = "bigfield::assert_equal") const; + void assert_is_not_equal(const bigfield& other, + std::string const& msg = "bigfield: prime limb diff is zero, but expected non-zero") const; void self_reduce() const; @@ -770,7 +770,7 @@ template class bigfield { // // a * b = q * p + r // - // where p is the quotient, r is the remainder, and p is the size of the non-native field. + // where q is the quotient, r is the remainder, and p is the size of the non-native field. // The CRT requires that we check that the equation: // (a) holds modulo the size of the native field n, // (b) holds modulo the size of the bigger ring 2^t, @@ -787,7 +787,7 @@ template class bigfield { // Note: We use a further safer bound of 2^((t + m - 1) / 2). We use -1 to stay safer, // because it provides additional space to avoid the overflow, but get_msb() by itself should be enough. uint64_t maximum_product_bits = maximum_product.get_msb() - 1; - return (uint512_t(1) << (maximum_product_bits >> 1)); + return (uint512_t(1) << (maximum_product_bits >> 1)) - uint512_t(1); } // If we encounter this maximum value of a bigfield we stop execution @@ -915,31 +915,46 @@ template class bigfield { // = 2^k * (3 * 2^2Q) + 2^k * 2^L * (4 * 2^2Q) // < 2^k * (2^L + 1) * (4 * 2^2Q) // < n - // ==> 2^k * 2^L * 2^(2Q + 2) < n - // ==> 2Q + 2 < (log(n) - k - L) - // ==> Q < ((log(n) - k - L) - 2) / 2 + // ==> 2^k * 2^L * 2^(2Q + 3) < n + // ==> 2Q + 3 < (log(n) - k - L) + // ==> Q < ((log(n) - k - L) - 3) / 2 // static constexpr uint64_t MAXIMUM_LIMB_SIZE_THAT_WOULDNT_OVERFLOW = - (bb::fr::modulus.get_msb() - MAX_ADDITION_LOG - NUM_LIMB_BITS - 2) / 2; + (bb::fr::modulus.get_msb() - MAX_ADDITION_LOG - NUM_LIMB_BITS - 3) / 2; // If the logarithm of the maximum value of a limb is more than this, we need to reduce. // We allow an element to be added to itself 10 times, so we allow the limb to grow by 10 bits. // Number 10 is arbitrary, there's no actual usecase for adding 1024 elements together. - static constexpr uint64_t MAX_UNREDUCED_LIMB_BITS = NUM_LIMB_BITS + 10; + static constexpr uint64_t MAX_UNREDUCED_LIMB_BITS = NUM_LIMB_BITS + MAX_ADDITION_LOG; // If we reach this size of a limb, we stop execution (as safety measure). This should never reach during addition // as we would reduce the limbs before they reach this size. static constexpr uint64_t PROHIBITED_LIMB_BITS = MAX_UNREDUCED_LIMB_BITS + 5; // If we encounter this maximum value of a bigfield we need to reduce it. - static constexpr uint256_t get_maximum_unreduced_limb_value() { return uint256_t(1) << MAX_UNREDUCED_LIMB_BITS; } + static constexpr uint256_t get_maximum_unreduced_limb_value() + { + return ((uint256_t(1) << MAX_UNREDUCED_LIMB_BITS) - uint256_t(1)); + } // If we encounter this maximum value of a limb we stop execution static constexpr uint256_t get_prohibited_limb_value() { return uint256_t(1) << PROHIBITED_LIMB_BITS; } static_assert(PROHIBITED_LIMB_BITS < MAXIMUM_LIMB_SIZE_THAT_WOULDNT_OVERFLOW); + // For testing purposes only + friend class bigfield_test_access; + private: + /** + * @brief Assert that the current bigfield is less than the given upper limit. + * + * @param upper_limit + * @warning This function is UNSAFE as it assumes that the bigfield element is already reduced. + */ + void unsafe_assert_less_than(const uint256_t& upper_limit, + std::string const& msg = "bigfield::unsafe_assert_less_than") const; + /** * @brief Get the witness indices of the (normalized) binary basis limbs * @@ -1123,6 +1138,37 @@ template class bigfield { }; // namespace stdlib +// NOTE: For testing private functions in bigfield +class bigfield_test_access { + public: + template + static void unsafe_assert_less_than(const bigfield& input, const uint256_t& upper_limit) + { + input.unsafe_assert_less_than(upper_limit); + } + + template + static void unsafe_evaluate_multiply_add(const bigfield& input_left, + const bigfield& input_to_mul, + const std::vector& to_add, + const bigfield& input_quotient, + const std::vector& input_remainders) + { + bigfield::unsafe_evaluate_multiply_add(input_left, input_to_mul, to_add, input_quotient, input_remainders); + } + + template + static void unsafe_evaluate_multiple_multiply_add(const std::vector& input_left, + const std::vector& input_right, + const std::vector& to_add, + const bigfield& input_quotient, + const std::vector& input_remainders) + { + bigfield::unsafe_evaluate_multiple_multiply_add( + input_left, input_right, to_add, input_quotient, input_remainders); + } +}; + template inline std::ostream& operator<<(std::ostream& os, bigfield const& v) { return os << v.get_value(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp index 4457667712e7..3602b9358954 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp @@ -9,8 +9,11 @@ #include "../field/field.hpp" #include "./bigfield.hpp" #include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/numeric/uintx/uintx.hpp" #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256r1.hpp" #include "barretenberg/transcript/origin_tag.hpp" #include #include @@ -22,16 +25,43 @@ namespace { auto& engine = numeric::get_debug_randomness(); } -STANDARD_TESTING_TAGS -template class stdlib_bigfield : public testing::Test { +enum struct InputType { + WITNESS, + CONSTANT, +}; + +constexpr InputType operator!(InputType type) +{ + return (type == InputType::WITNESS) ? InputType::CONSTANT : InputType::WITNESS; +} + +// Helper to extract Builder and Params from bigfield +template struct extract_builder; +template struct extract_fq_params; + +template