diff --git a/.custom-gcl.yml b/.custom-gcl.yml new file mode 100644 index 0000000000..0d2860a229 --- /dev/null +++ b/.custom-gcl.yml @@ -0,0 +1,10 @@ +version: v2.5.0 +name: custom-golangci-lint +plugins: + # partitiontest plugin from local source + - module: 'github.com/algorand/go-algorand/cmd/partitiontest_linter' + path: ./cmd/partitiontest_linter + # errortype plugin for error handling consistency + - module: fillmore-labs.com/errortype + import: fillmore-labs.com/errortype/gclplugin + version: v0.0.7 diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 283475e416..c1a821cfbb 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -119,6 +119,14 @@ jobs: job-type: "Test" build-type: "Nightly Build" details: "• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_tests.name }}`" + - name: Upload test logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: | + **/*.log + retention-days: 30 - name: Upload test artifacts to GitHub uses: actions/upload-artifact@v4 with: @@ -271,6 +279,12 @@ jobs: CI_KEEP_TEMP_PLATFORM: "ubuntu-24.04" S3_TESTDATA: ${{ secrets.S3_TESTDATA }} steps: + - name: Set CI_E2E_FILENAME for e2e test data publishing + run: | + # Set CI_E2E_FILENAME based on branch name, replacing '/' with '-' - used when publishing e2e test data to S3 (e2e.sh) for indexer tests + BRANCH_NAME="${{ github.event.inputs.branch || github.ref_name }}" + MODIFIED_BRANCH_NAME="${BRANCH_NAME//\//-}" + echo "CI_E2E_FILENAME=${MODIFIED_BRANCH_NAME}" >> $GITHUB_ENV - name: Download workspace archive uses: actions/download-artifact@v4 with: diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml index 3856e9a65b..18bccfb797 100644 --- a/.github/workflows/ci-pr.yml +++ b/.github/workflows/ci-pr.yml @@ -64,6 +64,14 @@ jobs: job-type: "Test" build-type: "PR Build" details: "• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_tests.name }}`" + - name: Upload test logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: | + **/*.log + retention-days: 30 - name: Upload test artifacts to GitHub if: ${{ !cancelled() }} uses: actions/upload-artifact@v4 diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 81f5564487..bab80f0363 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -1,6 +1,6 @@ name: "ReviewDog workflow" env: - GOLANGCI_LINT_VERSION: "v1.62.0" + GOLANGCI_LINT_VERSION: "v2.6.0" on: push: branches: @@ -20,20 +20,42 @@ jobs: with: path: crypto/libs key: libsodium-ubuntu-latest-${{ hashFiles('crypto/libsodium-fork/**') }} - # move go out of the way temporarily to avoid "go list ./..." from installing modules + # move go out of the way temporarily to avoid "go list ./..." from installing modules - name: Make libsodium.a run: sudo mv /usr/bin/go /usr/bin/go.bak && make libsodium && sudo mv /usr/bin/go.bak /usr/bin/go - - name: reviewdog-golangci-lint - uses: reviewdog/action-golangci-lint@v2.7.0 - with: - go_version_file: go.mod - golangci_lint_version: ${{ env.GOLANGCI_LINT_VERSION }} - golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners" - reporter: "github-pr-check" - tool_name: "Lint Errors" - level: "error" - fail_level: any - filter_mode: "nofilter" + - name: Add bin to PATH + run: | + echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH + echo "$RUNNER_WORKSPACE/$(basename $GITHUB_REPOSITORY)/bin" >> $GITHUB_PATH + - name: Set up Go + uses: ./.github/actions/setup-go + - name: Install golangci-lint + run: | + go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@${{ env.GOLANGCI_LINT_VERSION }} + golangci-lint --version + - name: Install reviewdog + run: | + curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.21.0/install.sh | sh -s -- v0.21.0 + reviewdog --version + - name: Run golangci-lint with reviewdog + env: + REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + set -e + + golangci-lint run \ + --output.text.path stdout \ + -c .golangci.yml \ + --issues-exit-code 0 \ + --allow-parallel-runners > temp_golangci-lint-errors.txt + + cat temp_golangci-lint-errors.txt | reviewdog \ + -f=golangci-lint \ + -name="Lint Errors" \ + -reporter=github-pr-check \ + -filter-mode=nofilter \ + -fail-level=any \ + -level=error # Non-Blocking Warnings Section reviewdog-warnings: runs-on: ubuntu-latest @@ -56,47 +78,34 @@ jobs: echo "$RUNNER_WORKSPACE/$(basename $GITHUB_REPOSITORY)/bin" >> $GITHUB_PATH - name: Set up Go uses: ./.github/actions/setup-go - - name: Create folders for golangci-lint - run: mkdir -p cicdtmp/golangci-lint - - name: Check if custom golangci-lint is already built - id: cache-golangci-lint + - name: Check if custom golangci-lint with partitiontest plugin is already built + id: cache-custom-golangci-lint uses: actions/cache@v4 with: - path: cicdtmp/golangci-lint/golangci-lint-cgo - key: cicd-golangci-lint-cgo-v0.0.3-${{ env.GO_VERSION }}-${{ env.GOLANGCI_LINT_VERSION }} - - - name: Build custom golangci-lint with CGO_ENABLED - if: steps.cache-golangci-lint.outputs.cache-hit != 'true' + path: custom-golangci-lint + key: custom-golangci-lint-${{ env.GO_VERSION }}-${{ env.GOLANGCI_LINT_VERSION }}-${{ hashFiles('cmd/partitiontest_linter/**', '.custom-gcl.yml') }} + - name: Build custom golangci-lint with partitiontest plugin + if: steps.cache-custom-golangci-lint.outputs.cache-hit != 'true' run: | - cd cicdtmp/golangci-lint - git clone https://github.com/golangci/golangci-lint.git . - git checkout tags/${GOLANGCI_LINT_VERSION} - CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint - ./golangci-lint-cgo --version - cd ../../ + go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@${{ env.GOLANGCI_LINT_VERSION }} custom -v + ./custom-golangci-lint --version - name: Install reviewdog run: | - curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.20.3/install.sh | sh -s -- v0.20.3 + curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.21.0/install.sh | sh -s -- v0.21.0 reviewdog --version - - name: Build custom linters - run: | - cd cmd/partitiontest_linter/ - CGO_ENABLED=true go build -buildmode=plugin -trimpath plugin/plugin.go - cd ../../ - ls -la cmd/partitiontest_linter/ - name: Run golangci-lint with reviewdog env: REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | set -e - ./cicdtmp/golangci-lint/golangci-lint-cgo run \ - --out-format line-number \ + ./custom-golangci-lint run \ + --output.text.path stdout \ -c .golangci-warnings.yml \ --issues-exit-code 0 \ - --allow-parallel-runners > temp_golangci-lint-cgo.txt + --allow-parallel-runners > temp_golangci-lint-warnings.txt - cat temp_golangci-lint-cgo.txt | reviewdog \ + cat temp_golangci-lint-warnings.txt | reviewdog \ -f=golangci-lint \ -name="Lint Warnings" \ -reporter=github-pr-check \ diff --git a/.gitignore b/.gitignore index b553f63bce..2845f0a7f1 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,9 @@ cmd/algod/algod cmd/goal/goal cmd/updater/updater +# custom golangci-lint binary with plugins +custom-golangci-lint + # Exclude our local temp directory tmp/ diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml index 8ab68f9faf..2bc88696f2 100644 --- a/.golangci-warnings.yml +++ b/.golangci-warnings.yml @@ -1,66 +1,46 @@ +version: "2" run: - timeout: 5m tests: true - linters: - disable-all: true + default: none enable: - gosec - partitiontest - -linters-settings: - gosec: # Go 1.22 makes G601 irrelevant - excludes: [G101, G103, G104, G107, G115, G202, G301, G302, G303, G304, G306, G307, G404, G601] - custom: - partitiontest: - path: cmd/partitiontest_linter/plugin.so - description: This custom linter checks files that end in '_test.go', specifically functions that start with 'Test' and have testing argument, for a line 'partitiontest.ParitionTest()' - original-url: github.com/algorand/go-algorand/cmd/partitiontest_linter - -severity: - default-severity: warning - + - errortype + settings: + gosec: + excludes: [G101, G103, G104, G107, G112, G114, G115, G202, G204, G301, G302, G303, G304, G306, G307, G404] + custom: + partitiontest: + type: "module" + description: This custom linter ensures test functions call 'partitiontest.PartitionTest(t)' + errortype: + type: module + description: "errortype helps prevent subtle bugs in error handling." + original-url: "https://fillmore-labs.com/errortype" + settings: + style-check: false + deep-is-check: true + check-is: true + unchecked-assert: false + check-unused: true + exclusions: + generated: lax + rules: + # be more lenient with test code + - linters: + - gosec + path: _test\.go + - linters: + - partitiontest + path: crypto/secp256k1/secp256_test\.go issues: - # use these new lint checks on code since #2574 - new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57 - - # Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below) - exclude-use-default: false - # Maximum issues count per one linter. Set to 0 to disable. Default is 50. max-issues-per-linter: 0 - # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. max-same-issues: 0 - - exclude: - # ignore govet false positive fixed in https://github.com/golang/go/issues/45043 - - "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify" - # ignore issues about the way we use _struct fields to define encoding settings - - "`_struct` is unused" - - # Enable some golangci-lint default exception rules: - # "EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok" - - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore" - - ineffective break statement. Did you mean to break out of the outer loop - - exclude-rules: - # be more lenient with test code - - path: _test\.go - linters: - - deadcode - - gosec - - structcheck - - varcheck - - unused - # Add all linters here -- Comment this block out for testing linters - - path: test/linttest/lintissues\.go - linters: - - deadcode - - structcheck - - varcheck - - unused - - path: crypto/secp256k1/secp256_test\.go - linters: - - partitiontest +severity: + default: warning +formatters: + exclusions: + generated: lax diff --git a/.golangci.yml b/.golangci.yml index 4c09369e1f..50996eed33 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,80 +1,151 @@ +version: "2" run: - timeout: 5m tests: true - + linters: - # default: deadcode, errcheck, gosimple, govet, ineffassign, staticcheck, typecheck, unused, varcheck - disable-all: true + default: none enable: - - errcheck + - asciicheck + - bidichk - copyloopvar - - gofmt - - gosimple + - dupword + - errcheck - govet - ineffassign + - iotamixing - misspell - nilerr + - nilnesserr - nolintlint - paralleltest + - reassign - revive + #- sqlclosecheck - staticcheck - - typecheck + - testifylint - unused + + settings: + dupword: + comments-only: true + ignore: + - long # "long long" is OK + errcheck: + exclude-functions: + # We do this 121 times and never check the error. + - (*github.com/spf13/cobra.Command).MarkFlagRequired + - (*github.com/spf13/pflag.FlagSet).MarkDeprecated + - (*github.com/spf13/pflag.FlagSet).MarkShorthandDeprecated + govet: + # Enables these linters in addition to the default ones. + enable: + - nilness + - reflectvaluecompare + - shadow + - sortslice + - waitgroup + disable: + - buildtag + settings: + printf: + # Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`). + funcs: + - (github.com/algorand/go-algorand/logging.Logger).Debugf + - (github.com/algorand/go-algorand/logging.Logger).Infof + - (github.com/algorand/go-algorand/logging.Logger).Warnf + - (github.com/algorand/go-algorand/logging.Logger).Errorf + - (github.com/algorand/go-algorand/logging.Logger).Fatalf + - (github.com/algorand/go-algorand/logging.Logger).Panicf + - (github.com/algorand/go-algorand/logging.Logger).Debugln + - (github.com/algorand/go-algorand/logging.Logger).Infoln + - (github.com/algorand/go-algorand/logging.Logger).Warnln + - (github.com/algorand/go-algorand/logging.Logger).Errorln + - (github.com/algorand/go-algorand/logging.Logger).Fatalln + - (github.com/algorand/go-algorand/logging.Logger).Panicln + - (github.com/algorand/go-algorand/logging.Logger).Debug + - (github.com/algorand/go-algorand/logging.Logger).Info + - (github.com/algorand/go-algorand/logging.Logger).Warn + - (github.com/algorand/go-algorand/logging.Logger).Error + - (github.com/algorand/go-algorand/logging.Logger).Fatal + - (github.com/algorand/go-algorand/logging.Logger).Panic + - (github.com/algorand/go-algorand/cmd/goal/main).reportInfof + - (github.com/algorand/go-algorand/cmd/goal/main).reportInfoln + - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnf + - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnln + - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawf + - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawln + - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorf + - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorln + shadow: + # explanation of strict vs non-strict: + # https://github.com/golang/tools/blob/v0.7.0/go/analysis/passes/shadow/shadow.go#L104-L122 + strict: false + nolintlint: + # require naming a specific linter X using //nolint:X + require-specific: true + # require comments like "//nolint:errcheck // Explanation of why we are ignoring linter here..." + require-explanation: true + staticcheck: + checks: + - all + - '-ST1000' # don't require package comments + - '-ST1003' # don't require currentDBRound vs currentDbRound + - '-ST1016' # OK to have mismatched receiver names + - '-SA3001' # we assign to b.N in several benchmarks + - '-SA1019' # TODO should fix, in Go 1.24 rand.Seed() is a no-op + - '-QF1008' # ignore suggestions to remove embedded fields (e.g., txn.SignedTxn.Txn -> txn.Txn) + - '-ST1005' # ignore "error strings should not be capitalized" + - '-QF1001' # ignore De Morgan's law suggestions + - '-QF1003' # ignore suggestions to replace if/else chain with switch + testifylint: + enable: + - error-is-as + disable-all: true -severity: - default-severity: error + exclusions: + generated: lax + rules: + # exclude all issues from several linters from test code (TODO should fix these issues) + - path: _test\.go + linters: + - errcheck + - ineffassign + - nolintlint + - staticcheck + - unused + - linters: govet + path: _test\.go + text: shadows declaration at line # allow shadowing in test code + - linters: paralleltest # Ignore missing t.Parallel calls in the following packages + path: ^(agreement|catchup|config|crypto|daemon|data|gen|ledger|logging|network|node|protocol|rpcs|stateproof|test|tools|util).*_test\.go -linters-settings: - nolintlint: - # require naming a specific linter X using //nolint:X - require-specific: true - # require comments like "//nolint:errcheck // Explanation of why we are ignoring linter here..." - require-explanation: true - errcheck: - exclude-functions: - # We do this 121 times and never check the error. - - (*github.com/spf13/cobra.Command).MarkFlagRequired - - (*github.com/spf13/pflag.FlagSet).MarkDeprecated - - (*github.com/spf13/pflag.FlagSet).MarkShorthandDeprecated - govet: - # Enables these linters in addition to the default ones. - enable: - - shadow - settings: - shadow: - # explanation of strict vs non-strict: - # https://github.com/golang/tools/blob/v0.7.0/go/analysis/passes/shadow/shadow.go#L104-L122 - strict: false - printf: - # Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`). - # Default: [] - funcs: - - (github.com/algorand/go-algorand/logging.Logger).Debugf - - (github.com/algorand/go-algorand/logging.Logger).Infof - - (github.com/algorand/go-algorand/logging.Logger).Warnf - - (github.com/algorand/go-algorand/logging.Logger).Errorf - - (github.com/algorand/go-algorand/logging.Logger).Fatalf - - (github.com/algorand/go-algorand/logging.Logger).Panicf - - (github.com/algorand/go-algorand/logging.Logger).Debugln - - (github.com/algorand/go-algorand/logging.Logger).Infoln - - (github.com/algorand/go-algorand/logging.Logger).Warnln - - (github.com/algorand/go-algorand/logging.Logger).Errorln - - (github.com/algorand/go-algorand/logging.Logger).Fatalln - - (github.com/algorand/go-algorand/logging.Logger).Panicln - - (github.com/algorand/go-algorand/logging.Logger).Debug - - (github.com/algorand/go-algorand/logging.Logger).Info - - (github.com/algorand/go-algorand/logging.Logger).Warn - - (github.com/algorand/go-algorand/logging.Logger).Error - - (github.com/algorand/go-algorand/logging.Logger).Fatal - - (github.com/algorand/go-algorand/logging.Logger).Panic - - (github.com/algorand/go-algorand/cmd/goal/main).reportInfof - - (github.com/algorand/go-algorand/cmd/goal/main).reportInfoln - - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnf - - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnln - - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawf - - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawln - - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorf - - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorln + # Enable default golangci-lint exclusion: "Almost all programs ignore errors on these functions and in most cases it's ok" + - linters: errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: revive + path: _test\.go + text: 'dot-imports: should not use dot imports' # dot imports OK for tests + - linters: revive + path: util/ + text: 'var-naming: avoid meaningless package names' # util package is OK + - linters: unused + text: 'field _struct is unused' # we use _struct field tags for msgp/json encoding settings + - linters: revive + text: '^unused-parameter: parameter' + - linters: revive + text: '^package-comments: should have a package comment' + - linters: revive + text: '^unexported-return: exported func .* returns unexported type .*, which can be annoying' + - linters: revive + text: '^redefines-builtin-id: redefinition of the built-in (func|type)' + - linters: revive + text: '^var-declaration: should omit type .* from declaration of var .*; it will be inferred from the right-hand side' + - linters: revive + text: '^var-declaration: should drop .* from declaration of var .*; it is the zero value' + - linters: revive + text: '^empty-block: this block is empty, you can remove it' + - linters: revive + text: '^superfluous-else: if block ends with .* so drop this else and outdent its block' issues: # Work our way back over time to be clean against all these @@ -82,142 +153,18 @@ issues: # run the linter and dig in. new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57~25 - # Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below) - exclude-use-default: false - # Maximum issues count per one linter. Set to 0 to disable. Default is 50. max-issues-per-linter: 0 # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. max-same-issues: 0 +severity: + default: error - exclude: - # ignore govet false positive fixed in https://github.com/golang/go/issues/45043 - - "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify" - # ignore golint false positive fixed in https://github.com/golang/lint/pull/487 - - "exported method (.*).Unwrap` should have comment or be unexported" - # ignore issues about the way we use _struct fields to define encoding settings - - "`_struct` is unused" - - # Enable some golangci-lint default exception rules: - # "EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok" - - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore" - - ineffective break statement. Did you mean to break out of the outer loop - # revive: irrelevant error about naming - - "^var-naming: don't use leading k in Go names" - # revive: ignore unused-paramter, package-comments, unexported-return, redefines-builtin-id, var-declaration, empty-block, superfluous-else - - "^unused-parameter: parameter" - - "^package-comments: should have a package comment" - - "^unexported-return: " - - "^redefines-builtin-id: redefinition of" - - "^var-declaration: should" - - "^empty-block: this block is empty, you can remove it" - - "^superfluous-else: if block ends with" - - exclude-rules: - - path: cmd/algofix/ - linters: unused - - path: cmd/algocfg/ - linters: unused - - path: cmd/catchpointdump/ - linters: unused - - path: tools/ - linters: unused - - path: daemon/kmd/lib/kmdapi/ - linters: unused - - path: _test\.go - linters: - - errcheck - # - gofmt - - gosimple - # - govet - - ineffassign - - misspell - # - nilerr - - nolintlint - # - revive - # - staticcheck - - typecheck - - unused - - path: _test\.go - linters: - - staticcheck - text: "SA4006: this value" # of X is never used - - linters: - - staticcheck - text: "(SA3001|SA1019):" - - path: _test\.go - linters: - - revive - text: "dot-imports: should not use dot imports" - - linters: - - staticcheck - text: "SA1019: rand*" - # allow shadowing in test code - - path: _test\.go - linters: - - govet - text: "shadows declaration at line" - # Ignore missing parallel tests in existing packages - - path: ^agreement.*_test\.go - linters: - - paralleltest - - path: ^catchup.*_test\.go - linters: - - paralleltest - - path: ^config.*_test\.go - linters: - - paralleltest - - path: ^crypto.*_test\.go - linters: - - paralleltest - - path: ^daemon.*_test\.go - linters: - - paralleltest - - path: ^data.*_test\.go - linters: - - paralleltest - - path: ^gen.*_test\.go - linters: - - paralleltest - - path: ^ledger.*_test\.go - linters: - - paralleltest - - path: ^logging.*_test\.go - linters: - - paralleltest - - path: ^network.*_test\.go - linters: - - paralleltest - - path: ^node.*_test\.go - linters: - - paralleltest - - path: ^protocol.*_test\.go - linters: - - paralleltest - - path: ^rpcs.*_test\.go - linters: - - paralleltest - - path: ^stateproof.*_test\.go - linters: - - paralleltest - - path: ^test.*_test\.go - linters: - - paralleltest - - path: ^tools.*_test\.go - linters: - - paralleltest - - path: ^util.*_test\.go - linters: - - paralleltest - # Add all linters here -- Comment this block out for testing linters - - path: test/linttest/lintissues\.go - linters: - - errcheck - - gofmt - - revive - - govet - - ineffassign - - misspell - - unused +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - test/linttest/lintissues\.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 461f9eebe3..67d9737903 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,7 +23,7 @@ The core development team monitors the Algorand [discord community](https://disc ## Pull Requests -All changes are are made via pull requests. +All changes are made via pull requests. Small changes are easier to review and merge than large ones, so the more focused a PR the better. If a feature requires refactoring, the refactoring should be a separate PR. If refactoring uncovers a bug, the fix should be a separate PR. These are not strict rules, but generally speaking, they make things easier to review which speeds up the PR process. diff --git a/Dockerfile b/Dockerfile index 04750adc5e..e63cce2972 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM ubuntu:24.04 AS builder -ARG GO_VERSION="1.23.9" +ARG GO_VERSION="1.25.3" ARG CHANNEL ARG URL diff --git a/Makefile b/Makefile index 759e13d5ca..cd8f077027 100644 --- a/Makefile +++ b/Makefile @@ -106,6 +106,9 @@ fmt: fix: build $(GOBIN)/algofix */ +modernize: + GOTOOLCHAIN=auto go run golang.org/x/tools/go/analysis/passes/modernize/cmd/modernize@latest -any=false -bloop=false -rangeint=false -fmtappendf=false -waitgroup=false -stringsbuilder=false -omitzero=false -fix ./... + lint: deps $(GOBIN)/golangci-lint run -c .golangci.yml @@ -129,7 +132,7 @@ tidy: check_go_version check_shell: find . -type f -name "*.sh" -exec shellcheck {} + -sanity: fix lint fmt tidy +sanity: fix lint fmt tidy modernize cover: go test $(GOTAGS) -coverprofile=cover.out $(UNIT_TEST_SOURCES) @@ -299,7 +302,7 @@ build-e2e: check-go-version crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a @mkdir -p $(GOBIN)-race # Build regular binaries (kmd, algod, goal) and race binaries in parallel $(GO_INSTALL) -trimpath $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./cmd/kmd ./cmd/algod ./cmd/goal & \ - GOBIN=$(GOBIN)-race go install -trimpath $(GOTAGS) -race -ldflags="$(GOLDFLAGS)" ./cmd/goal ./cmd/algod ./cmd/algoh ./cmd/tealdbg ./cmd/msgpacktool ./cmd/algokey ./tools/teal/algotmpl ./test/e2e-go/cli/tealdbg/cdtmock & \ + GOBIN=$(GOBIN)-race go install -trimpath $(GOTAGS) -race -ldflags="$(GOLDFLAGS)" ./cmd/goal ./cmd/algod ./cmd/algoh ./cmd/tealdbg ./cmd/msgpacktool ./cmd/algokey ./cmd/pingpong ./tools/teal/algotmpl ./test/e2e-go/cli/tealdbg/cdtmock & \ wait cp $(GOBIN)/kmd $(GOBIN)-race @@ -412,7 +415,7 @@ dump: $(addprefix gen/,$(addsuffix /genesis.dump, $(NETWORKS))) install: build scripts/dev_install.sh -p $(GOBIN) -.PHONY: default fmt lint check_shell sanity cover prof deps build build-race build-e2e test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_kmd_swagger universal libsodium +.PHONY: default fmt lint check_shell sanity cover prof deps build build-race build-e2e test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_kmd_swagger universal libsodium modernize ###### TARGETS FOR CICD PROCESS ###### include ./scripts/release/mule/Makefile.mule @@ -421,5 +424,5 @@ archive: aws s3 cp tmp/node_pkgs s3://algorand-internal/channel/$(CHANNEL)/$(FULLBUILDNUMBER) --recursive --exclude "*" --include "*$(FULLBUILDNUMBER)*" build_custom_linters: - cd $(SRCPATH)/cmd/partitiontest_linter/ && go build -buildmode=plugin -trimpath plugin/plugin.go && ls plugin.so - cd $(SRCPATH) + golangci-lint custom -v + ./custom-golangci-lint --version diff --git a/README.md b/README.md index 2b5ad446ea..31a4299094 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Algorand is a permissionless, pure proof-of-stake blockchain that delivers decen ## Getting Started -Visit our [developer website](https://developer.algorand.org/) for the most up-to-date information about using and installing the Algorand platform. +Visit our [developer website](https://dev.algorand.co/) for the most up-to-date information about using and installing the Algorand platform. ## Building from Source diff --git a/agreement/credentialArrivalHistory.go b/agreement/credentialArrivalHistory.go index 321ec5b3a4..b79237b547 100644 --- a/agreement/credentialArrivalHistory.go +++ b/agreement/credentialArrivalHistory.go @@ -17,7 +17,7 @@ package agreement import ( - "sort" + "slices" "time" ) @@ -78,6 +78,6 @@ func (history *credentialArrivalHistory) orderStatistics(idx int) time.Duration // the linear time order statistics algorithm. sortedArrivals := make([]time.Duration, len(history.history)) copy(sortedArrivals[:], history.history[:]) - sort.Slice(sortedArrivals, func(i, j int) bool { return sortedArrivals[i] < sortedArrivals[j] }) + slices.Sort(sortedArrivals) return sortedArrivals[idx] } diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go index efe21958ad..6fb752a565 100644 --- a/agreement/fuzzer/ledger_test.go +++ b/agreement/fuzzer/ledger_test.go @@ -19,6 +19,7 @@ package fuzzer import ( "context" "fmt" + "maps" "math/rand" "github.com/algorand/go-algorand/agreement" @@ -152,11 +153,8 @@ func makeTestLedger(state map[basics.Address]basics.AccountData, sync testLedger l.certs = make(map[basics.Round]agreement.Certificate) l.nextRound = 1 - // deep copy of state l.state = make(map[basics.Address]basics.AccountData) - for k, v := range state { - l.state[k] = v - } + maps.Copy(l.state, state) l.notifications = make(map[basics.Round]signal) l.EnsuringDigestStartCh = make(chan struct{}) diff --git a/agreement/fuzzer/networkFacade_test.go b/agreement/fuzzer/networkFacade_test.go index 47ba719b5a..1d0d7d622a 100644 --- a/agreement/fuzzer/networkFacade_test.go +++ b/agreement/fuzzer/networkFacade_test.go @@ -332,7 +332,7 @@ func (n *NetworkFacade) pushPendingReceivedMessage() bool { case network.Broadcast: n.broadcast(storedMsg.tag, storedMsg.data, -1, "NetworkFacade service-%v Broadcast-Action %v %v\n") default: - panic(nil) // not handled; agreement doesn't currently use this one. + panic(fmt.Sprintf("unhandled network action %v", outMsg.Action)) } if n.debugMessages { diff --git a/agreement/fuzzer/tests_test.go b/agreement/fuzzer/tests_test.go index 5970d55b2e..408feeeadc 100644 --- a/agreement/fuzzer/tests_test.go +++ b/agreement/fuzzer/tests_test.go @@ -492,10 +492,7 @@ func TestNetworkBandwidth(t *testing.T) { deadlock.Opts.Disable = true rnd := rand.New(rand.NewSource(0)) - k := 4 // outgoing connections - if k > relayCounts { - k = relayCounts - } + k := min(4, relayCounts) // outgoing connections statConf := &TrafficStatisticsFilterConfig{ OutputFormat: 2, } @@ -563,10 +560,7 @@ func TestUnstakedNetworkLinearGrowth(t *testing.T) { relayMaxBandwidth := []int{} - k := 4 // outgoing connections - if k > relayCount { - k = relayCount - } + k := min(4, relayCount) // outgoing connections statConf := &TrafficStatisticsFilterConfig{ OutputFormat: 0, } @@ -674,10 +668,7 @@ func TestStakedNetworkQuadricGrowth(t *testing.T) { totalRelayedMessages := []int{} deadlock.Opts.Disable = true - k := 2 // outgoing connections - if k > relayCount { - k = relayCount - } + k := min(2, relayCount) // outgoing connections statConf := &TrafficStatisticsFilterConfig{ OutputFormat: 0, } @@ -784,10 +775,7 @@ func TestRegossipinngElimination(t *testing.T) { nodeCount := 20 deadlock.Opts.Disable = true rnd := rand.New(rand.NewSource(0)) - k := 4 // outgoing connections - if k > relayCounts { - k = relayCounts - } + k := min(4, relayCounts) // outgoing connections statConf := &TrafficStatisticsFilterConfig{ OutputFormat: 2, } @@ -880,10 +868,7 @@ func BenchmarkNetworkPerformance(b *testing.B) { // disable deadlock checking code deadlock.Opts.Disable = true - k := 4 // outgoing connections - if k > relayCount { - k = relayCount - } + k := min(4, relayCount) // outgoing connections statConf := &TrafficStatisticsFilterConfig{ OutputFormat: 0, } diff --git a/agreement/fuzzer/voteFilter_test.go b/agreement/fuzzer/voteFilter_test.go index 23c1bcb3be..c0fcbaf7cf 100644 --- a/agreement/fuzzer/voteFilter_test.go +++ b/agreement/fuzzer/voteFilter_test.go @@ -90,7 +90,7 @@ func MakeVoteFilter(voteFilterConfig *VoteFilterConfig) *VoteFilter { } } func (n *VoteFilter) Eval(tag protocol.Tag, data []byte, direction string) bool { - msgDecoder := n.fuzzer.facades[n.nodeID].GetFilterByType(reflect.TypeOf(&MessageDecoderFilter{})).(*MessageDecoderFilter) + msgDecoder := n.fuzzer.facades[n.nodeID].GetFilterByType(reflect.TypeFor[*MessageDecoderFilter]()).(*MessageDecoderFilter) if msgDecoder == nil { return true } diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go index d2f0886b4d..a48e9ddd55 100644 --- a/agreement/gossip/networkFull_test.go +++ b/agreement/gossip/networkFull_test.go @@ -322,10 +322,7 @@ func testNetworkImplRebroadcast(t *testing.T, nodesCount int, cfg config.Local) nets, counters := spinNetwork(t, nodesCount, cfg) defer shutdownNetwork(nets, counters) - rebroadcastNodes := nodesCount - if rebroadcastNodes > 3 { - rebroadcastNodes = 3 - } + rebroadcastNodes := min(nodesCount, 3) for i := byte(0); i < byte(rebroadcastNodes); i++ { ok := nets[i].Broadcast(protocol.AgreementVoteTag, []byte{i, i + 1}) assert.NoError(t, ok) diff --git a/agreement/message.go b/agreement/message.go index e372b06fbe..e62dd63b0f 100644 --- a/agreement/message.go +++ b/agreement/message.go @@ -18,7 +18,6 @@ package agreement import ( "github.com/algorand/go-algorand/protocol" - "github.com/algorand/msgp/msgp" ) // A message represents an internal message which is passed between components @@ -26,9 +25,6 @@ import ( type message struct { _struct struct{} `codec:","` - // this field is for backwards compatibility with crash state serialized using go-codec prior to explicit unexport. - // should be removed after the next consensus update. - MessageHandle msgp.Raw `codec:"MessageHandle,omitempty"` // explicitly unexport this field since we can't define serializers for interface{} type // the only implementation of this is gossip.messageMetadata which doesn't have exported fields to serialize. messageHandle MessageHandle diff --git a/agreement/message_test.go b/agreement/message_test.go index a71a29830b..4c7712c591 100644 --- a/agreement/message_test.go +++ b/agreement/message_test.go @@ -17,7 +17,6 @@ package agreement import ( - "encoding/base64" "testing" "github.com/stretchr/testify/require" @@ -25,9 +24,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/committee" - "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/test/partitiontest" ) var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} @@ -83,52 +80,3 @@ func BenchmarkVoteDecoding(b *testing.B) { decodeVote(msgBytes) } } - -// TestMessageBackwardCompatibility ensures MessageHandle field can be -// properly decoded from message. -// This test is only needed for agreement state serialization switch from reflection to msgp. -func TestMessageBackwardCompatibility(t *testing.T) { - partitiontest.PartitionTest(t) - - type messageMetadata struct { - raw network.IncomingMessage - } - - encoded, err := base64.StdEncoding.DecodeString("iaZCdW5kbGWAr0NvbXBvdW5kTWVzc2FnZYKoUHJvcG9zYWyApFZvdGWArU1lc3NhZ2VIYW5kbGWAqFByb3Bvc2FsgKNUYWeiUFC1VW5hdXRoZW50aWNhdGVkQnVuZGxlgLdVbmF1dGhlbnRpY2F0ZWRQcm9wb3NhbICzVW5hdXRoZW50aWNhdGVkVm90ZYCkVm90ZYA=") - require.NoError(t, err) - - // run on master f57a276 to get the encoded data for above - // msg := message{ - // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}}, - // Tag: protocol.ProposalPayloadTag, - // } - - // result := protocol.EncodeReflect(&msg) - // fmt.Println(base64.StdEncoding.EncodeToString(result)) - - // messages for all rounds after this change should not have MessageHandle set so clearing it out and re-encoding/decoding it should yield this - targetMessage := message{ - Tag: protocol.ProposalPayloadTag, - } - - require.Containsf(t, string(encoded), "MessageHandle", "encoded message does not contain MessageHandle field") - var m1, m2, m3, m4 message - // Both msgp and reflection should decode the message containing old MessageHandle successfully - err = protocol.Decode(encoded, &m1) - require.NoError(t, err) - err = protocol.DecodeReflect(encoded, &m2) - require.NoError(t, err) - // after setting MessageHandle to nil both should re-encode and decode to same values - m1.MessageHandle = nil - m2.MessageHandle = nil - e1 := protocol.Encode(&m1) - e2 := protocol.EncodeReflect(&m2) - require.Equal(t, e1, e2) - require.NotContainsf(t, string(e1), "MessageHandle", "encoded message still contains MessageHandle field") - err = protocol.DecodeReflect(e1, &m3) - require.NoError(t, err) - err = protocol.Decode(e2, &m4) - require.NoError(t, err) - require.Equal(t, m3, m4) - require.Equal(t, m3, targetMessage) -} diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go index 16d464d39a..f6921ceb92 100644 --- a/agreement/msgp_gen.go +++ b/agreement/msgp_gen.go @@ -1051,8 +1051,6 @@ func (z *ConsensusVersionView) MsgIsZero() bool { func ConsensusVersionViewMaxSize() (s int) { s = 1 + 4 panic("Unable to determine max size: String type string(*z.Err) is unbounded") - s += 8 + protocol.ConsensusVersionMaxSize() - return } // MarshalMsg implements msgp.Marshaler @@ -1585,7 +1583,6 @@ func BlockAssemblerMaxSize() (s int) { s = 1 + 9 + UnauthenticatedProposalMaxSize() + 7 + msgp.BoolSize + 8 + ProposalMaxSize() + 10 + msgp.BoolSize + 15 // Calculating size of slice: z.Authenticators panic("Slice z.Authenticators is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -2252,17 +2249,6 @@ func (z *diskState) MsgIsZero() bool { func DiskStateMaxSize() (s int) { s = 1 + 7 panic("Unable to determine max size: Byteslice type z.Router is unbounded") - s += 7 - panic("Unable to determine max size: Byteslice type z.Player is unbounded") - s += 6 - panic("Unable to determine max size: Byteslice type z.Clock is unbounded") - s += 12 - // Calculating size of slice: z.ActionTypes - panic("Slice z.ActionTypes is unbounded") - s += 8 - // Calculating size of slice: z.Actions - panic("Slice z.Actions is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -3081,52 +3067,37 @@ func FreshnessDataMaxSize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *message) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values - zb0001Len := uint32(9) - var zb0001Mask uint16 /* 11 bits */ - if (*z).MessageHandle.MsgIsZero() { - zb0001Len-- - zb0001Mask |= 0x4 - } - // variable map header, size zb0001Len - o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len != 0 { - // string "Bundle" - o = append(o, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65) - o = (*z).Bundle.MarshalMsg(o) - // string "CompoundMessage" - o = append(o, 0xaf, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) - // map header, size 2 - // string "Proposal" - o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) - o = (*z).CompoundMessage.Proposal.MarshalMsg(o) - // string "Vote" - o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65) - o = (*z).CompoundMessage.Vote.MarshalMsg(o) - if (zb0001Mask & 0x4) == 0 { // if not empty - // string "MessageHandle" - o = append(o, 0xad, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65) - o = (*z).MessageHandle.MarshalMsg(o) - } - // string "Proposal" - o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) - o = (*z).Proposal.MarshalMsg(o) - // string "Tag" - o = append(o, 0xa3, 0x54, 0x61, 0x67) - o = (*z).Tag.MarshalMsg(o) - // string "UnauthenticatedBundle" - o = append(o, 0xb5, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65) - o = (*z).UnauthenticatedBundle.MarshalMsg(o) - // string "UnauthenticatedProposal" - o = append(o, 0xb7, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) - o = (*z).UnauthenticatedProposal.MarshalMsg(o) - // string "UnauthenticatedVote" - o = append(o, 0xb3, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x74, 0x65) - o = (*z).UnauthenticatedVote.MarshalMsg(o) - // string "Vote" - o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65) - o = (*z).Vote.MarshalMsg(o) - } + // map header, size 8 + // string "Bundle" + o = append(o, 0x88, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65) + o = (*z).Bundle.MarshalMsg(o) + // string "CompoundMessage" + o = append(o, 0xaf, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) + // map header, size 2 + // string "Proposal" + o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) + o = (*z).CompoundMessage.Proposal.MarshalMsg(o) + // string "Vote" + o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65) + o = (*z).CompoundMessage.Vote.MarshalMsg(o) + // string "Proposal" + o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) + o = (*z).Proposal.MarshalMsg(o) + // string "Tag" + o = append(o, 0xa3, 0x54, 0x61, 0x67) + o = (*z).Tag.MarshalMsg(o) + // string "UnauthenticatedBundle" + o = append(o, 0xb5, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65) + o = (*z).UnauthenticatedBundle.MarshalMsg(o) + // string "UnauthenticatedProposal" + o = append(o, 0xb7, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c) + o = (*z).UnauthenticatedProposal.MarshalMsg(o) + // string "UnauthenticatedVote" + o = append(o, 0xb3, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x74, 0x65) + o = (*z).UnauthenticatedVote.MarshalMsg(o) + // string "Vote" + o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65) + o = (*z).Vote.MarshalMsg(o) return } @@ -3153,14 +3124,6 @@ func (z *message) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ err = msgp.WrapError(err) return } - if zb0001 > 0 { - zb0001-- - bts, err = (*z).MessageHandle.UnmarshalMsgWithState(bts, st) - if err != nil { - err = msgp.WrapError(err, "struct-from-array", "MessageHandle") - return - } - } if zb0001 > 0 { zb0001-- bts, err = (*z).Tag.UnmarshalMsgWithState(bts, st) @@ -3312,12 +3275,6 @@ func (z *message) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ return } switch string(field) { - case "MessageHandle": - bts, err = (*z).MessageHandle.UnmarshalMsgWithState(bts, st) - if err != nil { - err = msgp.WrapError(err, "MessageHandle") - return - } case "Tag": bts, err = (*z).Tag.UnmarshalMsgWithState(bts, st) if err != nil { @@ -3453,20 +3410,18 @@ func (_ *message) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *message) Msgsize() (s int) { - s = 1 + 14 + (*z).MessageHandle.Msgsize() + 4 + (*z).Tag.Msgsize() + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 20 + (*z).UnauthenticatedVote.Msgsize() + 24 + (*z).UnauthenticatedProposal.Msgsize() + 22 + (*z).UnauthenticatedBundle.Msgsize() + 16 + 1 + 5 + (*z).CompoundMessage.Vote.Msgsize() + 9 + (*z).CompoundMessage.Proposal.Msgsize() + s = 1 + 4 + (*z).Tag.Msgsize() + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 20 + (*z).UnauthenticatedVote.Msgsize() + 24 + (*z).UnauthenticatedProposal.Msgsize() + 22 + (*z).UnauthenticatedBundle.Msgsize() + 16 + 1 + 5 + (*z).CompoundMessage.Vote.Msgsize() + 9 + (*z).CompoundMessage.Proposal.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *message) MsgIsZero() bool { - return ((*z).MessageHandle.MsgIsZero()) && ((*z).Tag.MsgIsZero()) && ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).UnauthenticatedVote.MsgIsZero()) && ((*z).UnauthenticatedProposal.MsgIsZero()) && ((*z).UnauthenticatedBundle.MsgIsZero()) && (((*z).CompoundMessage.Vote.MsgIsZero()) && ((*z).CompoundMessage.Proposal.MsgIsZero())) + return ((*z).Tag.MsgIsZero()) && ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).UnauthenticatedVote.MsgIsZero()) && ((*z).UnauthenticatedProposal.MsgIsZero()) && ((*z).UnauthenticatedBundle.MsgIsZero()) && (((*z).CompoundMessage.Vote.MsgIsZero()) && ((*z).CompoundMessage.Proposal.MsgIsZero())) } // MaxSize returns a maximum valid message size for this message type func MessageMaxSize() (s int) { - s = 1 + 14 - panic("Unable to determine max size: MaxSize() not implemented for Raw type") - s += 4 + protocol.TagMaxSize() + 5 + VoteMaxSize() + 9 + ProposalMaxSize() + 7 + BundleMaxSize() + 20 + UnauthenticatedVoteMaxSize() + 24 + UnauthenticatedProposalMaxSize() + 22 + UnauthenticatedBundleMaxSize() + 16 + 1 + 5 + UnauthenticatedVoteMaxSize() + 9 + UnauthenticatedProposalMaxSize() + s = 1 + 4 + protocol.TagMaxSize() + 5 + VoteMaxSize() + 9 + ProposalMaxSize() + 7 + BundleMaxSize() + 20 + UnauthenticatedVoteMaxSize() + 24 + UnauthenticatedProposalMaxSize() + 22 + UnauthenticatedBundleMaxSize() + 16 + 1 + 5 + UnauthenticatedVoteMaxSize() + 9 + UnauthenticatedProposalMaxSize() return } @@ -3758,10 +3713,6 @@ func (z *messageEvent) MsgIsZero() bool { func MessageEventMaxSize() (s int) { s = 1 + 2 + msgp.Uint8Size + 6 + MessageMaxSize() + 4 panic("Unable to determine max size: String type string(*z.Err) is unbounded") - s += 10 + msgp.Uint64Size + 5 - s += MessageEventMaxSize() - s += 10 + msgp.BoolSize + 6 + ConsensusVersionViewMaxSize() - return } // MarshalMsg implements msgp.Marshaler @@ -4206,7 +4157,6 @@ func PeriodRouterMaxSize() (s int) { s = 1 + 16 + ProposalTrackerMaxSize() + 18 + VoteTrackerPeriodMaxSize() + 24 + ProposalTrackerContractMaxSize() + 9 s += msgp.MapHeaderSize panic("Map z.Children is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -6154,10 +6104,6 @@ func ProposalStoreMaxSize() (s int) { s = 1 + 9 s += msgp.MapHeaderSize panic("Map z.Relevant is unbounded") - s += 7 + ProposalValueMaxSize() + 11 - s += msgp.MapHeaderSize - panic("Map z.Assemblers is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -6402,8 +6348,6 @@ func ProposalTableMaxSize() (s int) { s = 1 + 8 s += msgp.MapHeaderSize panic("Map z.Pending is unbounded") - s += 12 + msgp.Uint64Size - return } // MarshalMsg implements msgp.Marshaler @@ -6616,8 +6560,6 @@ func ProposalTrackerMaxSize() (s int) { s = 1 + 10 s += msgp.MapHeaderSize panic("Map z.Duplicate is unbounded") - s += 8 + ProposalSeekerMaxSize() + 8 + ProposalValueMaxSize() - return } // MarshalMsg implements msgp.Marshaler @@ -7168,7 +7110,6 @@ func ProposalVoteCounterMaxSize() (s int) { s = 1 + 6 + msgp.Uint64Size + 6 s += msgp.MapHeaderSize panic("Map z.Votes is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -7910,7 +7851,6 @@ func RootRouterMaxSize() (s int) { s = 1 + 16 + 1 + 15 + 1 + 9 s += msgp.MapHeaderSize panic("Map z.Children is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -8287,7 +8227,6 @@ func RoundRouterMaxSize() (s int) { s = 1 + 14 + ProposalStoreMaxSize() + 17 + 1 + 9 + ThresholdEventMaxSize() + 3 + msgp.BoolSize + 9 s += msgp.MapHeaderSize panic("Map z.Children is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -8647,7 +8586,6 @@ func (z serializableError) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func SerializableErrorMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -13042,14 +12980,6 @@ func VoteTrackerMaxSize() (s int) { s = 1 + 7 s += msgp.MapHeaderSize panic("Map z.Voters is unbounded") - s += 7 - s += msgp.MapHeaderSize - panic("Map z.Counts is unbounded") - s += 13 - s += msgp.MapHeaderSize - panic("Map z.Equivocators is unbounded") - s += 18 + msgp.Uint64Size - return } // MarshalMsg implements msgp.Marshaler diff --git a/agreement/persistence_test.go b/agreement/persistence_test.go index 1a7381aa6a..3380833507 100644 --- a/agreement/persistence_test.go +++ b/agreement/persistence_test.go @@ -182,13 +182,19 @@ func BenchmarkAgreementPersistenceRecovery(b *testing.B) { } } +const randomizedEncodingMaxCollectionLen = 8 // instead of 32 used in codec_tester.go + func randomizeDiskState() (rr rootRouter, p player) { - p2, err := protocol.RandomizeObject(&player{}) + opts := []protocol.RandomizeObjectOption{ + protocol.RandomizeObjectWithMaxCollectionLen(randomizedEncodingMaxCollectionLen), + protocol.RandomizeObjectSilenceAllocWarnings(), + } + p2, err := protocol.RandomizeObject(&player{}, opts...) if err != nil { return } - rr2, err := protocol.RandomizeObject(&rootRouter{}) + rr2, err := protocol.RandomizeObject(&rootRouter{}, opts...) if err != nil { return } @@ -200,7 +206,12 @@ func randomizeDiskState() (rr rootRouter, p player) { func TestRandomizedEncodingFullDiskState(t *testing.T) { partitiontest.PartitionTest(t) - for i := 0; i < 5000; i++ { + iterations := 1000 + if testing.Short() { + iterations = 500 + } + + for i := 0; i < iterations; i++ { router, player := randomizeDiskState() a := []action{} clock := timers.MakeMonotonicClock[TimeoutType](time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC)) diff --git a/agreement/player.go b/agreement/player.go index 1f582137d3..6336df9825 100644 --- a/agreement/player.go +++ b/agreement/player.go @@ -275,7 +275,7 @@ func (p *player) issueFastVote(r routerHandle) (actions []action) { func (p *player) handleCheckpointEvent(r routerHandle, e checkpointEvent) []action { return []action{ - checkpointAction{ //nolint:gosimple // explicit assignment for clarity + checkpointAction{ //nolint:staticcheck // explicit assignment for clarity Round: e.Round, Period: e.Period, Step: e.Step, @@ -331,13 +331,7 @@ func (p *player) calculateFilterTimeout(ver protocol.ConsensusVersion, tracer *t dynamicTimeout := p.lowestCredentialArrivals.orderStatistics(dynamicFilterTimeoutCredentialArrivalHistoryIdx) + dynamicFilterTimeoutGraceInterval // Make sure the dynamic filter timeout is not too small nor too large - clampedTimeout := dynamicTimeout - if clampedTimeout < dynamicFilterTimeoutLowerBound { - clampedTimeout = dynamicFilterTimeoutLowerBound - } - if clampedTimeout > defaultTimeout { - clampedTimeout = defaultTimeout - } + clampedTimeout := min(max(dynamicTimeout, dynamicFilterTimeoutLowerBound), defaultTimeout) tracer.log.Debugf("round %d, period %d: dynamicTimeout = %d, clamped timeout = %d", p.Round, p.Period, dynamicTimeout, clampedTimeout) // store dynamicFilterTimeout on the player for debugging & reporting p.dynamicFilterTimeout = dynamicTimeout diff --git a/agreement/proposalTable_test.go b/agreement/proposalTable_test.go deleted file mode 100644 index 172a70b633..0000000000 --- a/agreement/proposalTable_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2019-2025 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package agreement - -import ( - "encoding/base64" - "testing" - - "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/test/partitiontest" - "github.com/stretchr/testify/require" -) - -// This test is only necessary for transition to msgp encoding -// of the player state machine for agreement persistence -func TestProposalTableMsgpEncoding(t *testing.T) { - partitiontest.PartitionTest(t) - - type messageMetadata struct { - raw network.IncomingMessage - } - encoded, err := base64.StdEncoding.DecodeString("gqdQZW5kaW5ngQGHqUNhbmNlbGxlZMKjRXJywKVJbnB1dImmQnVuZGxlgK9Db21wb3VuZE1lc3NhZ2WCqFByb3Bvc2FsgKRWb3RlgK1NZXNzYWdlSGFuZGxlgKhQcm9wb3NhbICjVGFnolBQtVVuYXV0aGVudGljYXRlZEJ1bmRsZYC3VW5hdXRoZW50aWNhdGVkUHJvcG9zYWyAs1VuYXV0aGVudGljYXRlZFZvdGWApFZvdGWApVByb3RvgqNFcnLAp1ZlcnNpb26goVQApFRhaWzAqVRhc2tJbmRleD+rUGVuZGluZ05leHQB") - require.NoError(t, err) - - // run on master a3e90ad to get the encoded data for above - // pt := proposalTable{} - // msg := messageEvent{ - // Input: message{ - // Tag: protocol.ProposalPayloadTag, - // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}}, - // }, - // TaskIndex: 63} - // pt.push(&msg) - // result := protocol.EncodeReflect(&pt) - // fmt.Println(base64.StdEncoding.EncodeToString(result)) - - var ptMsgp, ptReflect proposalTable - err = protocol.Decode(encoded, &ptMsgp) - require.NoError(t, err) - err = protocol.DecodeReflect(encoded, &ptReflect) - require.NoError(t, err) - - msgMsgp := ptMsgp.pop(ptMsgp.PendingNext) - msgReflect := ptReflect.pop(ptReflect.PendingNext) - - // After setting MessageHandle to nil they should be the same - msgMsgp.Input.MessageHandle = nil - msgReflect.Input.MessageHandle = nil - require.Equal(t, msgMsgp, msgReflect) - // Check that the other fields we have manually set are still the same - require.Equal(t, msgMsgp.Input.Tag, protocol.ProposalPayloadTag) - require.Equal(t, msgMsgp.TaskIndex, uint64(63)) - -} diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go index 54c9fde7df..c0699c6ccb 100644 --- a/agreement/pseudonode.go +++ b/agreement/pseudonode.go @@ -415,23 +415,25 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru for _, result := range verifiedResults { totalWeight += result.v.Cred.Weight } - if t.node.log.IsLevelEnabled(logging.Info) { + if t.node.log.IsLevelEnabled(logging.Info) || t.node.log.GetTelemetryEnabled() { for _, result := range verifiedResults { vote := result.v - logEvent := logspec.AgreementEvent{ - Type: logspec.VoteBroadcast, - Sender: vote.R.Sender.String(), - Hash: vote.R.Proposal.BlockDigest.String(), - Round: uint64(t.round), - Period: uint64(t.period), - Step: uint64(t.step), - ObjectRound: uint64(vote.R.Round), - ObjectPeriod: uint64(vote.R.Period), - ObjectStep: uint64(vote.R.Step), - Weight: vote.Cred.Weight, - WeightTotal: totalWeight, + if t.node.log.IsLevelEnabled(logging.Info) { + logEvent := logspec.AgreementEvent{ + Type: logspec.VoteBroadcast, + Sender: vote.R.Sender.String(), + Hash: vote.R.Proposal.BlockDigest.String(), + Round: uint64(t.round), + Period: uint64(t.period), + Step: uint64(t.step), + ObjectRound: uint64(vote.R.Round), + ObjectPeriod: uint64(vote.R.Period), + ObjectStep: uint64(vote.R.Step), + Weight: vote.Cred.Weight, + WeightTotal: totalWeight, + } + t.node.log.with(logEvent).Infof("vote created for broadcast (weight %d, total weight %d)", vote.Cred.Weight, totalWeight) } - t.node.log.with(logEvent).Infof("vote created for broadcast (weight %d, total weight %d)", vote.Cred.Weight, totalWeight) if !t.node.log.GetTelemetryEnabled() { continue } @@ -445,7 +447,9 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru // Recovered: false, }) } - t.node.log.Infof("pseudonode.makeVotes: %v votes created for %v at (%v, %v, %v), total weight %v", len(verifiedResults), t.prop, t.round, t.period, t.step, totalWeight) + if t.node.log.IsLevelEnabled(logging.Info) { + t.node.log.Infof("pseudonode.makeVotes: %v votes created for %v at (%v, %v, %v), total weight %v", len(verifiedResults), t.prop, t.round, t.period, t.step, totalWeight) + } } if len(verifiedResults) > 0 { // wait until the persist state is flushed, as we don't want to send any vote unless we've completed flushing it to disk. diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go index fb94ac62fb..f8ae6548e7 100644 --- a/agreement/pseudonode_test.go +++ b/agreement/pseudonode_test.go @@ -19,7 +19,6 @@ package agreement import ( "context" "crypto/sha256" - "errors" "fmt" "strings" "testing" @@ -520,21 +519,21 @@ func TestPseudonodeNonEnqueuedTasks(t *testing.T) { for i := 0; i < pseudonodeVerificationBacklog*2; i++ { ch, err = pb.MakeProposals(context.Background(), startRound, period(i)) if err != nil { - require.ErrorAs(t, errPseudonodeBacklogFull, &err) + require.ErrorIs(t, err, errPseudonodeBacklogFull) break } channels = append(channels, ch) } enqueuedProposals := len(channels) require.Error(t, err, "MakeProposals did not returned an error when being overflowed with requests") - require.True(t, errors.Is(err, errPseudonodeBacklogFull)) + require.ErrorIs(t, err, errPseudonodeBacklogFull) persist := make(chan error) close(persist) for i := 0; i < pseudonodeVerificationBacklog*2; i++ { ch, err = pb.MakeVotes(context.Background(), startRound, period(i), step(i%5), makeProposalValue(period(i), accounts[0].Address()), persist) if err != nil { - require.ErrorAs(t, errPseudonodeBacklogFull, &err) + require.ErrorIs(t, err, errPseudonodeBacklogFull) break } channels = append(channels, ch) diff --git a/agreement/router.go b/agreement/router.go index a219e93ad2..146a3d1250 100644 --- a/agreement/router.go +++ b/agreement/router.go @@ -64,11 +64,7 @@ func init() { // for consistency in analytics we are setting the minimum to be 8 rounds // (equivalent to a dynamicFilterTimeoutLowerBound of 500 ms). minCredentialRoundLag := round(8) // round 2*2000ms / 500ms - credentialRoundLag = round(2 * config.Protocol.SmallLambda / dynamicFilterTimeoutLowerBound) - - if credentialRoundLag < minCredentialRoundLag { - credentialRoundLag = minCredentialRoundLag - } + credentialRoundLag = max(round(2*config.Protocol.SmallLambda/dynamicFilterTimeoutLowerBound), minCredentialRoundLag) if credentialRoundLag*round(dynamicFilterTimeoutLowerBound) < round(2*config.Protocol.SmallLambda) { credentialRoundLag++ } diff --git a/agreement/state_machine_test.go b/agreement/state_machine_test.go index 189ae432c7..437ec6799a 100644 --- a/agreement/state_machine_test.go +++ b/agreement/state_machine_test.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "os" + "slices" "strings" "github.com/algorand/go-algorand/logging" @@ -158,12 +159,7 @@ func (t ioTrace) CountEvent(b event) (count int) { // for each event, passes it into the given fn; if returns true, returns true. func (t ioTrace) ContainsFn(compareFn func(b event) bool) bool { - for _, ev := range t.events { - if compareFn(ev) { - return true - } - } - return false + return slices.ContainsFunc(t.events, compareFn) } func (t ioTrace) countAction() (count int) { @@ -395,7 +391,7 @@ func (blackhole) Write(data []byte) (int, error) { return len(data), nil } -// deterministicTraceTestCase encapsulates a traditional unit test test case. +// deterministicTraceTestCase encapsulates a traditional unit test case. type determisticTraceTestCase struct { inputs []event expectedOutputs []event diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index b1a3def2d0..95ad1d9b42 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -525,10 +525,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) { // 2. replay starts from X-CatchpointLookback+1 // 3. transaction evaluation at Y requires block up to MaxTxnLife+DeeperBlockHeaderHistory back from Y proto := config.Consensus[topBlock.CurrentProtocol] - lookback := proto.MaxTxnLife + proto.DeeperBlockHeaderHistory + proto.CatchpointLookback - if lookback < proto.MaxBalLookback { - lookback = proto.MaxBalLookback - } + lookback := max(proto.MaxTxnLife+proto.DeeperBlockHeaderHistory+proto.CatchpointLookback, proto.MaxBalLookback) lookbackForStateProofSupport := lookbackForStateproofsSupport(&topBlock) if lookback < lookbackForStateProofSupport { diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 75dd33838d..aee0cb8ea0 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -298,24 +298,6 @@ func (p *testUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMe return nil } -func (p *testUnicastPeer) Version() string { - return p.version -} - -func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error { - ps := p.gn.(*httpTestPeerSource) - var dispather network.MessageHandler - for _, v := range ps.dispatchHandlers { - if v.Tag == tag { - dispather = v.MessageHandler - break - } - } - require.NotNil(p.t, dispather) - dispather.Handle(network.IncomingMessage{Tag: tag, Data: msg, Sender: p, Net: p.gn}) - return nil -} - func makeTestUnicastPeer(gn network.GossipNode, t *testing.T) network.UnicastPeer { return makeTestUnicastPeerWithResponseOverride(gn, t, nil) } diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go index e2a6418843..8935851fa9 100644 --- a/catchup/peerSelector_test.go +++ b/catchup/peerSelector_test.go @@ -30,7 +30,6 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -52,12 +51,6 @@ type mockUnicastPeer struct { func (d *mockUnicastPeer) GetAddress() string { return d.address } -func (d *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protocol.Tag) error { - return nil -} -func (d *mockUnicastPeer) Version() string { - return "" -} func (d *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics network.Topics) (resp *network.Response, e error) { return nil, nil } diff --git a/catchup/service.go b/catchup/service.go index 0575fda879..88180df457 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -476,17 +476,11 @@ func (s *Service) fetchAndWrite(ctx context.Context, r basics.Round, prevFetchCo // TODO the following code does not handle the following case: seedLookback upgrades during fetch func (s *Service) pipelinedFetch(seedLookback uint64) { - maxParallelRequests := s.parallelBlocks - if maxParallelRequests < seedLookback { - maxParallelRequests = seedLookback - } + maxParallelRequests := max(s.parallelBlocks, seedLookback) minParallelRequests := seedLookback // Start the limited requests at max(1, 'seedLookback') - limitedParallelRequests := uint64(1) - if limitedParallelRequests < seedLookback { - limitedParallelRequests = seedLookback - } + limitedParallelRequests := max(1, seedLookback) completed := make(map[basics.Round]chan bool) var wg sync.WaitGroup diff --git a/catchup/service_test.go b/catchup/service_test.go index e375f354f8..4d28ad92c9 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -1088,7 +1088,7 @@ func TestServiceLedgerUnavailable(t *testing.T) { require.Less(t, local.LastRound(), remote.LastRound()) } -// TestServiceNoBlockForRound checks if fetchAndWrite does not repeats 500 times if a block not avaialble +// TestServiceNoBlockForRound checks if fetchAndWrite does not repeats 500 times if a block not available func TestServiceNoBlockForRound(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 772f635a4b..9d7c4d3e7e 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -74,7 +74,8 @@ func TestUGetBlockWs(t *testing.T) { block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, up) require.Error(t, err) - require.Error(t, noBlockForRoundError{}, err) + var noBlockErr noBlockForRoundError + require.ErrorAs(t, err, &noBlockErr) require.Equal(t, next+1, err.(noBlockForRoundError).round) require.Equal(t, next, err.(noBlockForRoundError).latest) require.Nil(t, block) @@ -120,7 +121,8 @@ func TestUGetBlockHTTP(t *testing.T) { block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, net.GetPeers()[0]) - require.Error(t, noBlockForRoundError{}, err) + var noBlockErr noBlockForRoundError + require.ErrorAs(t, err, &noBlockErr) require.Equal(t, next+1, err.(noBlockForRoundError).round) require.Equal(t, next, err.(noBlockForRoundError).latest) require.Contains(t, err.Error(), "no block available for given round") diff --git a/cmd/algod/main.go b/cmd/algod/main.go index f9f60a4c09..7f36c866a2 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -210,6 +210,9 @@ func run() int { log.Fatalf("Unable to load optional consensus protocols file: %v", err) } + // Configure batch verifier implementation based on config + crypto.SetEd25519BatchVerifier(cfg.EnableBatchVerification) + // Enable telemetry hook in daemon to send logs to cloud // If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests isTest := os.Getenv("ALGOTEST") != "" diff --git a/cmd/algofix/deadlock_test.go b/cmd/algofix/deadlock_test.go index d4874d23f1..1f9103015b 100644 --- a/cmd/algofix/deadlock_test.go +++ b/cmd/algofix/deadlock_test.go @@ -144,7 +144,7 @@ func testGoFmt(fset *token.FileSet, node interface{}) (out string, err error) { var buf bytes.Buffer err = format.Node(&buf, fset, node) if err == nil { - out = string(buf.Bytes()) + out = buf.String() } return } diff --git a/cmd/algofix/fix.go b/cmd/algofix/fix.go index 03c828a581..a1a4251de9 100644 --- a/cmd/algofix/fix.go +++ b/cmd/algofix/fix.go @@ -7,13 +7,9 @@ package main import ( "fmt" "go/ast" - "go/parser" "go/token" - "os" "path" - "reflect" "strconv" - "strings" ) type fix struct { @@ -323,160 +319,12 @@ func declImports(gen *ast.GenDecl, path string) bool { return false } -// isPkgDot reports whether t is the expression "pkg.name" -// where pkg is an imported identifier. -func isPkgDot(t ast.Expr, pkg, name string) bool { - sel, ok := t.(*ast.SelectorExpr) - return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name -} - -// isPtrPkgDot reports whether f is the expression "*pkg.name" -// where pkg is an imported identifier. -func isPtrPkgDot(t ast.Expr, pkg, name string) bool { - ptr, ok := t.(*ast.StarExpr) - return ok && isPkgDot(ptr.X, pkg, name) -} - // isTopName reports whether n is a top-level unresolved identifier with the given name. func isTopName(n ast.Expr, name string) bool { id, ok := n.(*ast.Ident) return ok && id.Name == name && id.Obj == nil } -// isName reports whether n is an identifier with the given name. -func isName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.String() == name -} - -// isCall reports whether t is a call to pkg.name. -func isCall(t ast.Expr, pkg, name string) bool { - call, ok := t.(*ast.CallExpr) - return ok && isPkgDot(call.Fun, pkg, name) -} - -// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil. -func isIdent(n interface{}) *ast.Ident { - id, _ := n.(*ast.Ident) - return id -} - -// refersTo reports whether n is a reference to the same object as x. -func refersTo(n ast.Node, x *ast.Ident) bool { - id, ok := n.(*ast.Ident) - // The test of id.Name == x.Name handles top-level unresolved - // identifiers, which all have Obj == nil. - return ok && id.Obj == x.Obj && id.Name == x.Name -} - -// isBlank reports whether n is the blank identifier. -func isBlank(n ast.Expr) bool { - return isName(n, "_") -} - -// isEmptyString reports whether n is an empty string literal. -func isEmptyString(n ast.Expr) bool { - lit, ok := n.(*ast.BasicLit) - return ok && lit.Kind == token.STRING && len(lit.Value) == 2 -} - -func warn(pos token.Pos, msg string, args ...interface{}) { - if pos.IsValid() { - msg = "%s: " + msg - arg1 := []interface{}{fset.Position(pos).String()} - args = append(arg1, args...) - } - fmt.Fprintf(os.Stderr, msg+"\n", args...) -} - -// countUses returns the number of uses of the identifier x in scope. -func countUses(x *ast.Ident, scope []ast.Stmt) int { - count := 0 - ff := func(n interface{}) { - if n, ok := n.(ast.Node); ok && refersTo(n, x) { - count++ - } - } - for _, n := range scope { - walk(n, ff) - } - return count -} - -// rewriteUses replaces all uses of the identifier x and !x in scope -// with f(x.Pos()) and fnot(x.Pos()). -func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) { - var lastF ast.Expr - ff := func(n interface{}) { - ptr, ok := n.(*ast.Expr) - if !ok { - return - } - nn := *ptr - - // The child node was just walked and possibly replaced. - // If it was replaced and this is a negation, replace with fnot(p). - not, ok := nn.(*ast.UnaryExpr) - if ok && not.Op == token.NOT && not.X == lastF { - *ptr = fnot(nn.Pos()) - return - } - if refersTo(nn, x) { - lastF = f(nn.Pos()) - *ptr = lastF - } - } - for _, n := range scope { - walk(n, ff) - } -} - -// assignsTo reports whether any of the code in scope assigns to or takes the address of x. -func assignsTo(x *ast.Ident, scope []ast.Stmt) bool { - assigned := false - ff := func(n interface{}) { - if assigned { - return - } - switch n := n.(type) { - case *ast.UnaryExpr: - // use of &x - if n.Op == token.AND && refersTo(n.X, x) { - assigned = true - return - } - case *ast.AssignStmt: - for _, l := range n.Lhs { - if refersTo(l, x) { - assigned = true - return - } - } - } - } - for _, n := range scope { - if assigned { - break - } - walk(n, ff) - } - return assigned -} - -// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos. -func newPkgDot(pos token.Pos, pkg, name string) ast.Expr { - return &ast.SelectorExpr{ - X: &ast.Ident{ - NamePos: pos, - Name: pkg, - }, - Sel: &ast.Ident{ - NamePos: pos, - Name: name, - }, - } -} - // renameTop renames all references to the top-level name old. // It returns true if it makes any changes. func renameTop(f *ast.File, old, new string) bool { @@ -640,210 +488,3 @@ func addImport(f *ast.File, ipath string) (added bool) { f.Imports = append(f.Imports, newImport) return true } - -// deleteImport deletes the import path from the file f, if present. -func deleteImport(f *ast.File, path string) (deleted bool) { - oldImport := importSpec(f, path) - - // Find the import node that imports path, if any. - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if oldImport != impspec { - continue - } - - // We found an import spec that imports path. - // Delete it. - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - } else if len(gen.Specs) == 1 { - gen.Lparen = token.NoPos // drop parens - } - if j > 0 { - // We deleted an entry but now there will be - // a blank line-sized hole where the import was. - // Close the hole by making the previous - // import appear to "end" where this one did. - gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() - } - break - } - } - - // Delete it from f.Imports. - for i, imp := range f.Imports { - if imp == oldImport { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - break - } - } - - return -} - -// rewriteImport rewrites any import of path oldPath to path newPath. -func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} - -func usesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - walk(f, func(n interface{}) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }) - - return -} - -func expr(s string) ast.Expr { - x, err := parser.ParseExpr(s) - if err != nil { - panic("parsing " + s + ": " + err.Error()) - } - // Remove position information to avoid spurious newlines. - killPos(reflect.ValueOf(x)) - return x -} - -var posType = reflect.TypeOf(token.Pos(0)) - -func killPos(v reflect.Value) { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - if !v.IsNil() { - killPos(v.Elem()) - } - case reflect.Slice: - n := v.Len() - for i := 0; i < n; i++ { - killPos(v.Index(i)) - } - case reflect.Struct: - n := v.NumField() - for i := 0; i < n; i++ { - f := v.Field(i) - if f.Type() == posType { - f.SetInt(0) - continue - } - killPos(f) - } - } -} - -// A Rename describes a single renaming. -type rename struct { - OldImport string // only apply rename if this import is present - NewImport string // add this import during rewrite - Old string // old name: p.T or *p.T - New string // new name: p.T or *p.T -} - -func renameFix(tab []rename) func(*ast.File) bool { - return func(f *ast.File) bool { - return renameFixTab(f, tab) - } -} - -func parseName(s string) (ptr bool, pkg, nam string) { - i := strings.Index(s, ".") - if i < 0 { - panic("parseName: invalid name " + s) - } - if strings.HasPrefix(s, "*") { - ptr = true - s = s[1:] - i-- - } - pkg = s[:i] - nam = s[i+1:] - return -} - -func renameFixTab(f *ast.File, tab []rename) bool { - fixed := false - added := map[string]bool{} - check := map[string]bool{} - for _, t := range tab { - if !imports(f, t.OldImport) { - continue - } - optr, opkg, onam := parseName(t.Old) - walk(f, func(n interface{}) { - np, ok := n.(*ast.Expr) - if !ok { - return - } - x := *np - if optr { - p, ok := x.(*ast.StarExpr) - if !ok { - return - } - x = p.X - } - if !isPkgDot(x, opkg, onam) { - return - } - if t.NewImport != "" && !added[t.NewImport] { - addImport(f, t.NewImport) - added[t.NewImport] = true - } - *np = expr(t.New) - check[t.OldImport] = true - fixed = true - }) - } - - for ipath := range check { - if !usesImport(f, ipath) { - deleteImport(f, ipath) - } - } - return fixed -} diff --git a/cmd/algofix/import_test.go b/cmd/algofix/import_test.go deleted file mode 100644 index 8644e28f85..0000000000 --- a/cmd/algofix/import_test.go +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "go/ast" - -func init() { - addTestCases(importTests, nil) -} - -var importTests = []testCase{ - { - Name: "import.0", - Fn: addImportFn("os"), - In: `package main - -import ( - "os" -) -`, - Out: `package main - -import ( - "os" -) -`, - }, - { - Name: "import.1", - Fn: addImportFn("os"), - In: `package main -`, - Out: `package main - -import "os" -`, - }, - { - Name: "import.2", - Fn: addImportFn("os"), - In: `package main - -// Comment -import "C" -`, - Out: `package main - -// Comment -import "C" -import "os" -`, - }, - { - Name: "import.3", - Fn: addImportFn("os"), - In: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - }, - { - Name: "import.4", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "os" -) -`, - Out: `package main -`, - }, - { - Name: "import.5", - Fn: deleteImportFn("os"), - In: `package main - -// Comment -import "C" -import "os" -`, - Out: `package main - -// Comment -import "C" -`, - }, - { - Name: "import.6", - Fn: deleteImportFn("os"), - In: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - }, - { - Name: "import.7", - Fn: deleteImportFn("io"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - // a - "os" // b - "utf8" // c -) -`, - }, - { - Name: "import.8", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - "io" // a - // b - "utf8" // c -) -`, - }, - { - Name: "import.9", - Fn: deleteImportFn("utf8"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - "io" // a - "os" // b - // c -) -`, - }, - { - Name: "import.10", - Fn: deleteImportFn("io"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "os" - "utf8" -) -`, - }, - { - Name: "import.11", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "io" - "utf8" -) -`, - }, - { - Name: "import.12", - Fn: deleteImportFn("utf8"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "io" - "os" -) -`, - }, - { - Name: "import.13", - Fn: rewriteImportFn("utf8", "encoding/utf8"), - In: `package main - -import ( - "io" - "os" - "utf8" // thanks ken -) -`, - Out: `package main - -import ( - "encoding/utf8" // thanks ken - "io" - "os" -) -`, - }, - { - Name: "import.14", - Fn: rewriteImportFn("asn1", "encoding/asn1"), - In: `package main - -import ( - "asn1" - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "time" -) - -var x = 1 -`, - Out: `package main - -import ( - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "time" -) - -var x = 1 -`, - }, - { - Name: "import.15", - Fn: rewriteImportFn("url", "net/url"), - In: `package main - -import ( - "bufio" - "net" - "path" - "url" -) - -var x = 1 // comment on x, not on url -`, - Out: `package main - -import ( - "bufio" - "net" - "net/url" - "path" -) - -var x = 1 // comment on x, not on url -`, - }, - { - Name: "import.16", - Fn: rewriteImportFn("http", "net/http", "template", "text/template"), - In: `package main - -import ( - "flag" - "http" - "log" - "template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - Out: `package main - -import ( - "flag" - "log" - "net/http" - "text/template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - }, - { - Name: "import.17", - Fn: addImportFn("x/y/z", "x/a/c"), - In: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/w" - - "d/f" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/a/c" - "x/w" - "x/y/z" - - "d/f" -) -`, - }, - { - Name: "import.18", - Fn: addDelImportFn("e", "o"), - In: `package main - -import ( - "f" - "o" - "z" -) -`, - Out: `package main - -import ( - "e" - "f" - "z" -) -`, - }, -} - -func addImportFn(path ...string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - for _, p := range path { - if !imports(f, p) { - addImport(f, p) - fixed = true - } - } - return fixed - } -} - -func deleteImportFn(path string) func(*ast.File) bool { - return func(f *ast.File) bool { - if imports(f, path) { - deleteImport(f, path) - return true - } - return false - } -} - -func addDelImportFn(p1 string, p2 string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - if !imports(f, p1) { - addImport(f, p1) - fixed = true - } - if imports(f, p2) { - deleteImport(f, p2) - fixed = true - } - return fixed - } -} - -func rewriteImportFn(oldnew ...string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - for i := 0; i < len(oldnew); i += 2 { - if imports(f, oldnew[i]) { - rewriteImport(f, oldnew[i], oldnew[i+1]) - fixed = true - } - } - return fixed - } -} diff --git a/cmd/algofix/main.go b/cmd/algofix/main.go index d3bb997c3d..848de29c77 100644 --- a/cmd/algofix/main.go +++ b/cmd/algofix/main.go @@ -70,14 +70,14 @@ func main() { if *allowedRewrites != "" { allowed = make(map[string]bool) - for _, f := range strings.Split(*allowedRewrites, ",") { + for f := range strings.SplitSeq(*allowedRewrites, ",") { allowed[f] = true } } if *forceRewrites != "" { force = make(map[string]bool) - for _, f := range strings.Split(*forceRewrites, ",") { + for f := range strings.SplitSeq(*forceRewrites, ",") { force[f] = true } } @@ -212,16 +212,6 @@ func processFile(filename string, useStdin bool) error { return os.WriteFile(f.Name(), newSrc, 0) } -var gofmtBuf bytes.Buffer - -func gofmt(n interface{}) string { - gofmtBuf.Reset() - if err := format.Node(&gofmtBuf, fset, n); err != nil { - return "<" + err.Error() + ">" - } - return gofmtBuf.String() -} - func report(err error) { scanner.PrintError(os.Stderr, err) exitCode = 2 diff --git a/cmd/algofix/main_test.go b/cmd/algofix/main_test.go index 2355214e23..bccc1778d1 100644 --- a/cmd/algofix/main_test.go +++ b/cmd/algofix/main_test.go @@ -22,18 +22,6 @@ type testCase struct { var testCases []testCase -func addTestCases(t []testCase, fn func(*ast.File) bool) { - // Fill in fn to avoid repetition in definitions. - if fn != nil { - for i := range t { - if t[i].Fn == nil { - t[i].Fn = fn - } - } - } - testCases = append(testCases, t...) -} - func fnop(*ast.File) bool { return false } func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) { diff --git a/cmd/algofix/typecheck.go b/cmd/algofix/typecheck.go deleted file mode 100644 index e17cbf5963..0000000000 --- a/cmd/algofix/typecheck.go +++ /dev/null @@ -1,797 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "maps" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strings" -) - -// Partial type checker. -// -// The fact that it is partial is very important: the input is -// an AST and a description of some type information to -// assume about one or more packages, but not all the -// packages that the program imports. The checker is -// expected to do as much as it can with what it has been -// given. There is not enough information supplied to do -// a full type check, but the type checker is expected to -// apply information that can be derived from variable -// declarations, function and method returns, and type switches -// as far as it can, so that the caller can still tell the types -// of expression relevant to a particular fix. -// -// TODO(rsc,gri): Replace with go/typechecker. -// Doing that could be an interesting test case for go/typechecker: -// the constraints about working with partial information will -// likely exercise it in interesting ways. The ideal interface would -// be to pass typecheck a map from importpath to package API text -// (Go source code), but for now we use data structures (TypeConfig, Type). -// -// The strings mostly use gofmt form. -// -// A Field or FieldList has as its type a comma-separated list -// of the types of the fields. For example, the field list -// x, y, z int -// has type "int, int, int". - -// The prefix "type " is the type of a type. -// For example, given -// var x int -// type T int -// x's type is "int" but T's type is "type int". -// mkType inserts the "type " prefix. -// getType removes it. -// isType tests for it. - -func mkType(t string) string { - return "type " + t -} - -func getType(t string) string { - if !isType(t) { - return "" - } - return t[len("type "):] -} - -func isType(t string) bool { - return strings.HasPrefix(t, "type ") -} - -// TypeConfig describes the universe of relevant types. -// For ease of creation, the types are all referred to by string -// name (e.g., "reflect.Value"). TypeByName is the only place -// where the strings are resolved. -type TypeConfig struct { - Type map[string]*Type - Var map[string]string - Func map[string]string - - // External maps from a name to its type. - // It provides additional typings not present in the Go source itself. - // For now, the only additional typings are those generated by cgo. - External map[string]string -} - -// typeof returns the type of the given name, which may be of -// the form "x" or "p.X". -func (cfg *TypeConfig) typeof(name string) string { - if cfg.Var != nil { - if t := cfg.Var[name]; t != "" { - return t - } - } - if cfg.Func != nil { - if t := cfg.Func[name]; t != "" { - return "func()" + t - } - } - return "" -} - -// Type describes the Fields and Methods of a type. -// If the field or method cannot be found there, it is next -// looked for in the Embed list. -type Type struct { - Field map[string]string // map field name to type - Method map[string]string // map method name to comma-separated return types (should start with "func ") - Embed []string // list of types this type embeds (for extra methods) - Def string // definition of named type -} - -// dot returns the type of "typ.name", making its decision -// using the type information in cfg. -func (typ *Type) dot(cfg *TypeConfig, name string) string { - if typ.Field != nil { - if t := typ.Field[name]; t != "" { - return t - } - } - if typ.Method != nil { - if t := typ.Method[name]; t != "" { - return t - } - } - - for _, e := range typ.Embed { - etyp := cfg.Type[e] - if etyp != nil { - if t := etyp.dot(cfg, name); t != "" { - return t - } - } - } - - return "" -} - -// typecheck type checks the AST f assuming the information in cfg. -// It returns two maps with type information: -// typeof maps AST nodes to type information in gofmt string form. -// assign maps type strings to lists of expressions that were assigned -// to values of another type that were assigned to that type. -func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) { - typeof = make(map[interface{}]string) - assign = make(map[string][]interface{}) - cfg1 := &TypeConfig{} - *cfg1 = *cfg // make copy so we can add locally - copied := false - - // If we import "C", add types of cgo objects. - cfg.External = map[string]string{} - cfg1.External = cfg.External - if imports(f, "C") { - // Run cgo on gofmtFile(f) - // Parse, extract decls from _cgo_gotypes.go - // Map _Ctype_* types to C.* types. - err := func() error { - txt, err := gofmtFile(f) - if err != nil { - return err - } - dir, err := os.MkdirTemp(os.TempDir(), "fix_cgo_typecheck") - if err != nil { - return err - } - defer os.RemoveAll(dir) - err = os.WriteFile(filepath.Join(dir, "in.go"), txt, 0600) - if err != nil { - return err - } - cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), "tool", "cgo", "-objdir", dir, "-srcdir", dir, "in.go") - err = cmd.Run() - if err != nil { - return err - } - out, err := os.ReadFile(filepath.Join(dir, "_cgo_gotypes.go")) - if err != nil { - return err - } - cgo, err := parser.ParseFile(token.NewFileSet(), "cgo.go", out, 0) - if err != nil { - return err - } - for _, decl := range cgo.Decls { - fn, ok := decl.(*ast.FuncDecl) - if !ok { - continue - } - if strings.HasPrefix(fn.Name.Name, "_Cfunc_") { - var params, results []string - for _, p := range fn.Type.Params.List { - t := gofmt(p.Type) - t = strings.Replace(t, "_Ctype_", "C.", -1) - params = append(params, t) - } - for _, r := range fn.Type.Results.List { - t := gofmt(r.Type) - t = strings.Replace(t, "_Ctype_", "C.", -1) - results = append(results, t) - } - cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results) - } - } - return nil - }() - if err != nil { - fmt.Printf("warning: no cgo types: %s\n", err) - } - } - - // gather function declarations - for _, decl := range f.Decls { - fn, ok := decl.(*ast.FuncDecl) - if !ok { - continue - } - typecheck1(cfg, fn.Type, typeof, assign) - t := typeof[fn.Type] - if fn.Recv != nil { - // The receiver must be a type. - rcvr := typeof[fn.Recv] - if !isType(rcvr) { - if len(fn.Recv.List) != 1 { - continue - } - rcvr = mkType(gofmt(fn.Recv.List[0].Type)) - typeof[fn.Recv.List[0].Type] = rcvr - } - rcvr = getType(rcvr) - if rcvr != "" && rcvr[0] == '*' { - rcvr = rcvr[1:] - } - typeof[rcvr+"."+fn.Name.Name] = t - } else { - if isType(t) { - t = getType(t) - } else { - t = gofmt(fn.Type) - } - typeof[fn.Name] = t - - // Record typeof[fn.Name.Obj] for future references to fn.Name. - typeof[fn.Name.Obj] = t - } - } - - // gather struct declarations - for _, decl := range f.Decls { - d, ok := decl.(*ast.GenDecl) - if ok { - for _, s := range d.Specs { - switch s := s.(type) { - case *ast.TypeSpec: - if cfg1.Type[s.Name.Name] != nil { - break - } - if !copied { - copied = true - // Copy map lazily: it's time. - cfg1.Type = maps.Clone(cfg.Type) - } - t := &Type{Field: map[string]string{}} - cfg1.Type[s.Name.Name] = t - switch st := s.Type.(type) { - case *ast.StructType: - for _, f := range st.Fields.List { - for _, n := range f.Names { - t.Field[n.Name] = gofmt(f.Type) - } - } - case *ast.ArrayType, *ast.StarExpr, *ast.MapType: - t.Def = gofmt(st) - } - } - } - } - } - - typecheck1(cfg1, f, typeof, assign) - return typeof, assign -} - -func makeExprList(a []*ast.Ident) []ast.Expr { - var b []ast.Expr - for _, x := range a { - b = append(b, x) - } - return b -} - -// Typecheck1 is the recursive form of typecheck. -// It is like typecheck but adds to the information in typeof -// instead of allocating a new map. -func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) { - // set sets the type of n to typ. - // If isDecl is true, n is being declared. - set := func(n ast.Expr, typ string, isDecl bool) { - if typeof[n] != "" || typ == "" { - if typeof[n] != typ { - assign[typ] = append(assign[typ], n) - } - return - } - typeof[n] = typ - - // If we obtained typ from the declaration of x - // propagate the type to all the uses. - // The !isDecl case is a cheat here, but it makes - // up in some cases for not paying attention to - // struct fields. The real type checker will be - // more accurate so we won't need the cheat. - if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { - typeof[id.Obj] = typ - } - } - - // Type-check an assignment lhs = rhs. - // If isDecl is true, this is := so we can update - // the types of the objects that lhs refers to. - typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) { - if len(lhs) > 1 && len(rhs) == 1 { - if _, ok := rhs[0].(*ast.CallExpr); ok { - t := split(typeof[rhs[0]]) - // Lists should have same length but may not; pair what can be paired. - for i := 0; i < len(lhs) && i < len(t); i++ { - set(lhs[i], t[i], isDecl) - } - return - } - } - if len(lhs) == 1 && len(rhs) == 2 { - // x = y, ok - rhs = rhs[:1] - } else if len(lhs) == 2 && len(rhs) == 1 { - // x, ok = y - lhs = lhs[:1] - } - - // Match as much as we can. - for i := 0; i < len(lhs) && i < len(rhs); i++ { - x, y := lhs[i], rhs[i] - if typeof[y] != "" { - set(x, typeof[y], isDecl) - } else { - set(y, typeof[x], false) - } - } - } - - expand := func(s string) string { - typ := cfg.Type[s] - if typ != nil && typ.Def != "" { - return typ.Def - } - return s - } - - // The main type check is a recursive algorithm implemented - // by walkBeforeAfter(n, before, after). - // Most of it is bottom-up, but in a few places we need - // to know the type of the function we are checking. - // The before function records that information on - // the curfn stack. - var curfn []*ast.FuncType - - before := func(n interface{}) { - // push function type on stack - switch n := n.(type) { - case *ast.FuncDecl: - curfn = append(curfn, n.Type) - case *ast.FuncLit: - curfn = append(curfn, n.Type) - } - } - - // After is the real type checker. - after := func(n interface{}) { - if n == nil { - return - } - if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace - defer func() { - if t := typeof[n]; t != "" { - pos := fset.Position(n.(ast.Node).Pos()) - fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t) - } - }() - } - - switch n := n.(type) { - case *ast.FuncDecl, *ast.FuncLit: - // pop function type off stack - curfn = curfn[:len(curfn)-1] - - case *ast.FuncType: - typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results]))) - - case *ast.FieldList: - // Field list is concatenation of sub-lists. - t := "" - for _, field := range n.List { - if t != "" { - t += ", " - } - t += typeof[field] - } - typeof[n] = t - - case *ast.Field: - // Field is one instance of the type per name. - all := "" - t := typeof[n.Type] - if !isType(t) { - // Create a type, because it is typically *T or *p.T - // and we might care about that type. - t = mkType(gofmt(n.Type)) - typeof[n.Type] = t - } - t = getType(t) - if len(n.Names) == 0 { - all = t - } else { - for _, id := range n.Names { - if all != "" { - all += ", " - } - all += t - typeof[id.Obj] = t - typeof[id] = t - } - } - typeof[n] = all - - case *ast.ValueSpec: - // var declaration. Use type if present. - if n.Type != nil { - t := typeof[n.Type] - if !isType(t) { - t = mkType(gofmt(n.Type)) - typeof[n.Type] = t - } - t = getType(t) - for _, id := range n.Names { - set(id, t, true) - } - } - // Now treat same as assignment. - typecheckAssign(makeExprList(n.Names), n.Values, true) - - case *ast.AssignStmt: - typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE) - - case *ast.Ident: - // Identifier can take its type from underlying object. - if t := typeof[n.Obj]; t != "" { - typeof[n] = t - } - - case *ast.SelectorExpr: - // Field or method. - name := n.Sel.Name - if t := typeof[n.X]; t != "" { - t = strings.TrimPrefix(t, "*") // implicit * - if typ := cfg.Type[t]; typ != nil { - if t := typ.dot(cfg, name); t != "" { - typeof[n] = t - return - } - } - tt := typeof[t+"."+name] - if isType(tt) { - typeof[n] = getType(tt) - return - } - } - // Package selector. - if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil { - str := x.Name + "." + name - if cfg.Type[str] != nil { - typeof[n] = mkType(str) - return - } - if t := cfg.typeof(x.Name + "." + name); t != "" { - typeof[n] = t - return - } - } - - case *ast.CallExpr: - // make(T) has type T. - if isTopName(n.Fun, "make") && len(n.Args) >= 1 { - typeof[n] = gofmt(n.Args[0]) - return - } - // new(T) has type *T - if isTopName(n.Fun, "new") && len(n.Args) == 1 { - typeof[n] = "*" + gofmt(n.Args[0]) - return - } - // Otherwise, use type of function to determine arguments. - t := typeof[n.Fun] - if t == "" { - t = cfg.External[gofmt(n.Fun)] - } - in, out := splitFunc(t) - if in == nil && out == nil { - return - } - typeof[n] = join(out) - for i, arg := range n.Args { - if i >= len(in) { - break - } - if typeof[arg] == "" { - typeof[arg] = in[i] - } - } - - case *ast.TypeAssertExpr: - // x.(type) has type of x. - if n.Type == nil { - typeof[n] = typeof[n.X] - return - } - // x.(T) has type T. - if t := typeof[n.Type]; isType(t) { - typeof[n] = getType(t) - } else { - typeof[n] = gofmt(n.Type) - } - - case *ast.SliceExpr: - // x[i:j] has type of x. - typeof[n] = typeof[n.X] - - case *ast.IndexExpr: - // x[i] has key type of x's type. - t := expand(typeof[n.X]) - if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") { - // Lazy: assume there are no nested [] in the array - // length or map key type. - if i := strings.Index(t, "]"); i >= 0 { - typeof[n] = t[i+1:] - } - } - - case *ast.StarExpr: - // *x for x of type *T has type T when x is an expr. - // We don't use the result when *x is a type, but - // compute it anyway. - t := expand(typeof[n.X]) - if isType(t) { - typeof[n] = "type *" + getType(t) - } else if strings.HasPrefix(t, "*") { - typeof[n] = t[len("*"):] - } - - case *ast.UnaryExpr: - // &x for x of type T has type *T. - t := typeof[n.X] - if t != "" && n.Op == token.AND { - typeof[n] = "*" + t - } - - case *ast.CompositeLit: - // T{...} has type T. - typeof[n] = gofmt(n.Type) - - // Propagate types down to values used in the composite literal. - t := expand(typeof[n]) - if strings.HasPrefix(t, "[") { // array or slice - // Lazy: assume there are no nested [] in the array length. - if i := strings.Index(t, "]"); i >= 0 { - et := t[i+1:] - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - e = kv.Value - } - if typeof[e] == "" { - typeof[e] = et - } - } - } - } - if strings.HasPrefix(t, "map[") { // map - // Lazy: assume there are no nested [] in the map key type. - if i := strings.Index(t, "]"); i >= 0 { - kt, vt := t[4:i], t[i+1:] - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - if typeof[kv.Key] == "" { - typeof[kv.Key] = kt - } - if typeof[kv.Value] == "" { - typeof[kv.Value] = vt - } - } - } - } - } - if typ := cfg.Type[t]; typ != nil && len(typ.Field) > 0 { // struct - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - if ft := typ.Field[fmt.Sprintf("%s", kv.Key)]; ft != "" { - if typeof[kv.Value] == "" { - typeof[kv.Value] = ft - } - } - } - } - } - - case *ast.ParenExpr: - // (x) has type of x. - typeof[n] = typeof[n.X] - - case *ast.RangeStmt: - t := expand(typeof[n.X]) - if t == "" { - return - } - var key, value string - if t == "string" { - key, value = "int", "rune" - } else if strings.HasPrefix(t, "[") { - key = "int" - if i := strings.Index(t, "]"); i >= 0 { - value = t[i+1:] - } - } else if strings.HasPrefix(t, "map[") { - if i := strings.Index(t, "]"); i >= 0 { - key, value = t[4:i], t[i+1:] - } - } - changed := false - if n.Key != nil && key != "" { - changed = true - set(n.Key, key, n.Tok == token.DEFINE) - } - if n.Value != nil && value != "" { - changed = true - set(n.Value, value, n.Tok == token.DEFINE) - } - // Ugly failure of vision: already type-checked body. - // Do it again now that we have that type info. - if changed { - typecheck1(cfg, n.Body, typeof, assign) - } - - case *ast.TypeSwitchStmt: - // Type of variable changes for each case in type switch, - // but go/parser generates just one variable. - // Repeat type check for each case with more precise - // type information. - as, ok := n.Assign.(*ast.AssignStmt) - if !ok { - return - } - varx, ok := as.Lhs[0].(*ast.Ident) - if !ok { - return - } - t := typeof[varx] - for _, cas := range n.Body.List { - cas := cas.(*ast.CaseClause) - if len(cas.List) == 1 { - // Variable has specific type only when there is - // exactly one type in the case list. - if tt := typeof[cas.List[0]]; isType(tt) { - tt = getType(tt) - typeof[varx] = tt - typeof[varx.Obj] = tt - typecheck1(cfg, cas.Body, typeof, assign) - } - } - } - // Restore t. - typeof[varx] = t - typeof[varx.Obj] = t - - case *ast.ReturnStmt: - if len(curfn) == 0 { - // Probably can't happen. - return - } - f := curfn[len(curfn)-1] - res := n.Results - if f.Results != nil { - t := split(typeof[f.Results]) - for i := 0; i < len(res) && i < len(t); i++ { - set(res[i], t[i], false) - } - } - - case *ast.BinaryExpr: - // Propagate types across binary ops that require two args of the same type. - switch n.Op { - case token.EQL, token.NEQ: // TODO: more cases. This is enough for the cftype fix. - if typeof[n.X] != "" && typeof[n.Y] == "" { - typeof[n.Y] = typeof[n.X] - } - if typeof[n.X] == "" && typeof[n.Y] != "" { - typeof[n.X] = typeof[n.Y] - } - } - } - } - walkBeforeAfter(f, before, after) -} - -// Convert between function type strings and lists of types. -// Using strings makes this a little harder, but it makes -// a lot of the rest of the code easier. This will all go away -// when we can use go/typechecker directly. - -// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. -func splitFunc(s string) (in, out []string) { - if !strings.HasPrefix(s, "func(") { - return nil, nil - } - - i := len("func(") // index of beginning of 'in' arguments - nparen := 0 - for j := i; j < len(s); j++ { - switch s[j] { - case '(': - nparen++ - case ')': - nparen-- - if nparen < 0 { - // found end of parameter list - out := strings.TrimSpace(s[j+1:]) - if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' { - out = out[1 : len(out)-1] - } - return split(s[i:j]), split(out) - } - } - } - return nil, nil -} - -// joinFunc is the inverse of splitFunc. -func joinFunc(in, out []string) string { - outs := "" - if len(out) == 1 { - outs = " " + out[0] - } else if len(out) > 1 { - outs = " (" + join(out) + ")" - } - return "func(" + join(in) + ")" + outs -} - -// split splits "int, float" into ["int", "float"] and splits "" into []. -func split(s string) []string { - out := []string{} - i := 0 // current type being scanned is s[i:j]. - nparen := 0 - for j := 0; j < len(s); j++ { - switch s[j] { - case ' ': - if i == j { - i++ - } - case '(': - nparen++ - case ')': - nparen-- - if nparen < 0 { - // probably can't happen - return nil - } - case ',': - if nparen == 0 { - if i < j { - out = append(out, s[i:j]) - } - i = j + 1 - } - } - } - if nparen != 0 { - // probably can't happen - return nil - } - if i < len(s) { - out = append(out, s[i:]) - } - return out -} - -// join is the inverse of split. -func join(x []string) string { - return strings.Join(x, ", ") -} diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go index 84570ab11f..8c11658a09 100644 --- a/cmd/algoh/main.go +++ b/cmd/algoh/main.go @@ -391,7 +391,7 @@ func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, ou if errorOutput.output != "" { fmt.Fprintf(os.Stdout, "errorOutput.output: `%s`\n", errorOutput.output) errorCondition = true - fmt.Fprintf(os.Stderr, errorOutput.output) + fmt.Fprint(os.Stderr, errorOutput.output) details := telemetryspec.ErrorOutputEventDetails{ Error: errorOutput.output, Output: output.output, diff --git a/cmd/algorelay/commands.go b/cmd/algorelay/commands.go index 0f8b0f84fb..95799b89cb 100644 --- a/cmd/algorelay/commands.go +++ b/cmd/algorelay/commands.go @@ -42,10 +42,10 @@ type exitError struct { errorMessage string } -func makeExitError(exitCode int, errMsg string, errArgs ...interface{}) exitError { +func makeExitError(exitCode int, errMsg string) exitError { ee := exitError{ exitCode: exitCode, - errorMessage: fmt.Sprintf(errMsg, errArgs...), + errorMessage: errMsg, } return ee } diff --git a/cmd/algorelay/relayCmd.go b/cmd/algorelay/relayCmd.go index b6ee24ed04..7167c7f7cb 100644 --- a/cmd/algorelay/relayCmd.go +++ b/cmd/algorelay/relayCmd.go @@ -524,9 +524,6 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom // Returns an array of names starting with the target ip/name and ending with the outermost reference func getTargetDNSChain(nameEntries map[string]string, target string) (names []string, err error) { target = strings.ToLower(target) - if err != nil { - return - } names = append(names, target) for { diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index e79a86bce1..3a1075b4ab 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -447,10 +447,7 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc } if time.Since(lastProgressUpdate) > 50*time.Millisecond && catchpointFileSize > 0 { lastProgressUpdate = time.Now() - progressRatio := int(float64(progress) * barLength / float64(catchpointFileSize)) - if progressRatio > barLength { - progressRatio = barLength - } + progressRatio := min(int(float64(progress)*barLength/float64(catchpointFileSize)), barLength) printLoadCatchpointProgressLine(progressRatio, barLength, int64(progress)) } } @@ -466,7 +463,7 @@ func printDumpingCatchpointProgressLine(progress int, barLength int, dld int64) if dld > 0 { outString = fmt.Sprintf(outString+" %d", dld) } - fmt.Printf(escapeCursorUp + escapeDeleteLine + outString + "\n") + fmt.Print(escapeCursorUp + escapeDeleteLine + outString + "\n") } func printAccountsDatabase(databaseName string, stagingTables bool, fileHeader ledger.CatchpointFileHeader, outFile *os.File, excludeFields []string) error { diff --git a/cmd/catchupsrv/tarblocks.go b/cmd/catchupsrv/tarblocks.go index 93a4293ad6..8c921eaa07 100644 --- a/cmd/catchupsrv/tarblocks.go +++ b/cmd/catchupsrv/tarblocks.go @@ -19,7 +19,6 @@ package main import ( "archive/tar" "compress/bzip2" - "errors" "fmt" "io" "os" @@ -224,5 +223,4 @@ func (tbf *tarBlockFile) getBlock(round uint64) (data []byte, err error) { return } } - return nil, errors.New("this should be unreachable") } diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go index 4f217cda7b..19649a6be3 100644 --- a/cmd/dispenser/server.go +++ b/cmd/dispenser/server.go @@ -136,7 +136,7 @@ func dispense(w http.ResponseWriter, r *http.Request) { targets := r.Form["target"] if len(targets) != 1 { log.Printf("Corrupted target argument\n") - http.Error(w, err.Error(), http.StatusInternalServerError) + http.Error(w, "Corrupted target argument", http.StatusBadRequest) return } diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go index 453ae05d12..9b520a2644 100644 --- a/cmd/goal/commands.go +++ b/cmd/goal/commands.go @@ -438,7 +438,7 @@ func ensurePassword() []byte { } func reportInfoln(args ...interface{}) { - for _, line := range strings.Split(fmt.Sprint(args...), "\n") { + for line := range strings.SplitSeq(fmt.Sprint(args...), "\n") { printable, line := unicodePrintable(line) if !printable { fmt.Println(infoNonPrintableCharacters) @@ -454,7 +454,7 @@ func reportInfof(format string, args ...interface{}) { // reportWarnRawln prints a warning message to stderr. Only use this function if that warning // message already indicates that it's a warning. Otherwise, use reportWarnln func reportWarnRawln(args ...interface{}) { - for _, line := range strings.Split(fmt.Sprint(args...), "\n") { + for line := range strings.SplitSeq(fmt.Sprint(args...), "\n") { printable, line := unicodePrintable(line) if !printable { fmt.Fprintln(os.Stderr, infoNonPrintableCharacters) @@ -484,7 +484,7 @@ func reportWarnf(format string, args ...interface{}) { func reportErrorln(args ...interface{}) { outStr := fmt.Sprint(args...) - for _, line := range strings.Split(outStr, "\n") { + for line := range strings.SplitSeq(outStr, "\n") { printable, line := unicodePrintable(line) if !printable { fmt.Fprintln(os.Stderr, errorNonPrintableCharacters) diff --git a/cmd/goal/node.go b/cmd/goal/node.go index af98f8a9c6..dd91319f97 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -198,7 +198,7 @@ var catchupCmd = &cobra.Command{ fmt.Printf(nodeConfirmImplicitCatchpoint, catchpoint) reader := bufio.NewReader(os.Stdin) text, _ := reader.ReadString('\n') - text = strings.Replace(text, "\n", "", -1) + text = strings.ReplaceAll(text, "\n", "") if text != "yes" { reportErrorf(errorAbortedPerUserRequest) } @@ -714,7 +714,7 @@ func verifyPeerDialArg() bool { } // make sure that the format of each entry is valid: - for _, peer := range strings.Split(peerDial, ";") { + for peer := range strings.SplitSeq(peerDial, ";") { _, err := naddr.ParseHostOrURLOrMultiaddr(peer) if err != nil { reportErrorf("Provided peer '%s' is not a valid peer address : %v", peer, err) diff --git a/cmd/nodecfg/download.go b/cmd/nodecfg/download.go index e87077e73a..85c554e7ef 100644 --- a/cmd/nodecfg/download.go +++ b/cmd/nodecfg/download.go @@ -23,7 +23,6 @@ import ( "path/filepath" "github.com/algorand/go-algorand/util/s3" - "github.com/algorand/go-algorand/util/tar" ) func downloadAndExtractConfigPackage(channel string, targetDir string, configBucket string) (err error) { @@ -74,7 +73,7 @@ func downloadConfigPackage(channelName string, targetDir string, configBucket st } func extractConfigPackage(packageFile string, targetDir string) (err error) { - err = tar.UncompressFile(packageFile, targetDir) + err = UncompressFile(packageFile, targetDir) if err != nil { return } diff --git a/util/tar/untar.go b/cmd/nodecfg/untar.go similarity index 75% rename from util/tar/untar.go rename to cmd/nodecfg/untar.go index e04af188a3..2339bbe00d 100644 --- a/util/tar/untar.go +++ b/cmd/nodecfg/untar.go @@ -14,14 +14,16 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package tar +package main import ( "archive/tar" "compress/gzip" + "fmt" "io" "os" "path/filepath" + "strings" ) // UncompressFile takes the name of a tar/gz archive file and expands @@ -45,6 +47,7 @@ func Uncompress(r io.Reader, dst string) error { defer gzr.Close() tr := tar.NewReader(gzr) + baseDir := filepath.Clean(dst) for { header, err := tr.Next() @@ -65,7 +68,10 @@ func Uncompress(r io.Reader, dst string) error { } // the target location where the dir/file should be created - target := filepath.Join(dst, header.Name) + target, err := resolveEntryPath(baseDir, header.Name) + if err != nil { + return err + } // the following switch could also be done using fi.Mode(), not sure if there // a benefit of using one vs. the other. @@ -90,7 +96,7 @@ func Uncompress(r io.Reader, dst string) error { } // copy over contents - if _, err := io.Copy(f, tr); err != nil { + if _, err := io.Copy(f, tr); err != nil { //nolint:gosec // only used with trusted testing config data return err } @@ -100,3 +106,24 @@ func Uncompress(r io.Reader, dst string) error { } } } + +func resolveEntryPath(destination, headerName string) (string, error) { + cleanDest := filepath.Clean(destination) + cleanName := filepath.Clean(headerName) + + if filepath.IsAbs(cleanName) { + return "", fmt.Errorf("tar entry %q: absolute paths are not supported", headerName) + } + + target := filepath.Join(cleanDest, cleanName) + rel, err := filepath.Rel(cleanDest, target) + if err != nil { + return "", fmt.Errorf("tar entry %q: %w", headerName, err) + } + + if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return "", fmt.Errorf("tar entry %q: invalid path", headerName) + } + + return target, nil +} diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index 8e4cf3df84..8b2243879f 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -256,7 +256,7 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bo } func opsToMarkdown(out io.Writer, version uint64) error { - _, err := out.Write([]byte(fmt.Sprintf("# v%d Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n", version))) + _, err := fmt.Fprintf(out, "# v%d Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n", version) if err != nil { return err } diff --git a/cmd/partitiontest_linter/go.mod b/cmd/partitiontest_linter/go.mod index 93336aa3e0..e4d7f8eafe 100644 --- a/cmd/partitiontest_linter/go.mod +++ b/cmd/partitiontest_linter/go.mod @@ -1,12 +1,15 @@ module github.com/algorand/go-algorand/cmd/partitiontest_linter -go 1.23 +go 1.25 -toolchain go1.23.9 +toolchain go1.25.3 require ( - golang.org/x/mod v0.22.0 // indirect - golang.org/x/sync v0.9.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/sync v0.13.0 // indirect ) -require golang.org/x/tools v0.27.0 +require ( + github.com/golangci/plugin-module-register v0.1.2 + golang.org/x/tools v0.32.0 +) diff --git a/cmd/partitiontest_linter/go.sum b/cmd/partitiontest_linter/go.sum index 394d668cc6..e2328570de 100644 --- a/cmd/partitiontest_linter/go.sum +++ b/cmd/partitiontest_linter/go.sum @@ -1,8 +1,10 @@ +github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= diff --git a/cmd/partitiontest_linter/linter.go b/cmd/partitiontest_linter/linter.go index 05af0ae6b9..6807e8c2ed 100644 --- a/cmd/partitiontest_linter/linter.go +++ b/cmd/partitiontest_linter/linter.go @@ -20,6 +20,7 @@ import ( "go/ast" "strings" + "github.com/golangci/plugin-module-register/register" "golang.org/x/tools/go/analysis" ) @@ -30,10 +31,10 @@ const functionNamePrefix string = "Test" const parameterType string = "T" const parameterName string = "t" -// Analyzer initilization +// Analyzer initialization var Analyzer = &analysis.Analyzer{ - Name: "lint", - Doc: "This custom linter checks inside files that end in '_test.go', and inside functions that start with 'Test' and have testing argument, for a line 'partitiontest.ParitionTest()'", + Name: "partitiontest", + Doc: "This custom linter checks inside files that end in '_test.go', and inside functions that start with 'Test' and have testing argument, for a line 'partitiontest.PartitionTest()'", Run: run, } @@ -58,7 +59,7 @@ func run(pass *analysis.Pass) (interface{}, error) { if !isTestParameterInFunction(fn.Type.Params.List[0].Type, parameterType) { continue } - if !isSearchLineInFunction(fn) { + if !hasPartitionInvocation(f, fn) { pass.Reportf(fn.Pos(), "%s: Add missing partition call to top of test. To disable partitioning, add it as a comment: %s.%s(%s)", fn.Name.Name, packageName, functionName, parameterName) } @@ -83,17 +84,26 @@ func isTestParameterInFunction(typ ast.Expr, wantType string) bool { return false } +func hasPartitionInvocation(file *ast.File, fn *ast.FuncDecl) bool { + if isSearchLineInFunction(fn) { + return true + } + return hasPartitionComment(file, fn) +} + func isSearchLineInFunction(fn *ast.FuncDecl) bool { for _, oneline := range fn.Body.List { if exprStmt, ok := oneline.(*ast.ExprStmt); ok { if call, ok := exprStmt.X.(*ast.CallExpr); ok { - if fun, ok := call.Fun.(*ast.SelectorExpr); ok { - if !doesPackageNameMatch(fun) { - continue - } - if !doesFunctionNameMatch(fun) { - continue - } + fun, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + if !doesPackageNameMatch(fun) { + continue + } + if !doesFunctionNameMatch(fun) { + continue } if !doesParameterNameMatch(call, fn) { @@ -107,6 +117,20 @@ func isSearchLineInFunction(fn *ast.FuncDecl) bool { return false } +func hasPartitionComment(file *ast.File, fn *ast.FuncDecl) bool { + for _, commentGroup := range file.Comments { + if commentGroup.Pos() < fn.Pos() || commentGroup.Pos() > fn.End() { + continue + } + for _, comment := range commentGroup.List { + if strings.Contains(comment.Text, "partitiontest.PartitionTest(") { + return true + } + } + } + return false +} + func doesPackageNameMatch(fun *ast.SelectorExpr) bool { if packageobject, ok := fun.X.(*ast.Ident); ok { if packageobject.Name == packageName { @@ -131,3 +155,24 @@ func doesParameterNameMatch(call *ast.CallExpr, fn *ast.FuncDecl) bool { } return false } + +// V2 module plugin registration + +func init() { + register.Plugin("partitiontest", New) +} + +// PartitionTestPlugin implements the golangci-lint v2 module plugin interface +type PartitionTestPlugin struct{} + +func New(_ any) (register.LinterPlugin, error) { + return &PartitionTestPlugin{}, nil +} + +func (p *PartitionTestPlugin) BuildAnalyzers() ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +func (p *PartitionTestPlugin) GetLoadMode() string { + return register.LoadModeSyntax +} diff --git a/cmd/partitiontest_linter/plugin/plugin.go b/cmd/partitiontest_linter/plugin/plugin.go deleted file mode 100644 index a8f248c65c..0000000000 --- a/cmd/partitiontest_linter/plugin/plugin.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2019-2025 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package main - -import ( - linter "github.com/algorand/go-algorand/cmd/partitiontest_linter" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" -) - -type analyzerPlugin struct{} - -// This must be implemented -func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer { - return []*analysis.Analyzer{ - linter.Analyzer, - } -} - -// AnalyzerPlugin must be defined and named 'AnalyzerPlugin' -var AnalyzerPlugin analyzerPlugin - -func main() { - singlechecker.Main(linter.Analyzer) -} diff --git a/cmd/partitiontest_linter/testdata/linter_testdata_test.go b/cmd/partitiontest_linter/testdata/linter_testdata_test.go index b50d313b3e..2e06e15dfd 100644 --- a/cmd/partitiontest_linter/testdata/linter_testdata_test.go +++ b/cmd/partitiontest_linter/testdata/linter_testdata_test.go @@ -42,13 +42,13 @@ func notTestFunctionWithCorrectParamWrongLine(t *testing.T) { println("something") } -func TestFunctionWithCorrectParamOnly(t *testing.T) {} // want "function is missing partitiontest.PartitionTest" +func TestFunctionWithCorrectParamOnly(t *testing.T) {} // want "Add missing partition call to top of test" func TestFunctionWithCorrectParamCorrectLine(t *testing.T) { partitiontest.PartitionTest(t) } -func TestFunctionWithCorrectParamBadLine(t *testing.T) { // want "function is missing partitiontest.PartitionTest" +func TestFunctionWithCorrectParamBadLine(t *testing.T) { // want "Add missing partition call to top of test" println("something") } @@ -56,6 +56,8 @@ func TestFunctionWithDifferentName(n *testing.T) { partitiontest.PartitionTest(n) } +func helperFunction(t *testing.T) {} + func TestFunctionWithCorrectParamNotFirstCorrectLine(t *testing.T) { println("something") partitiontest.PartitionTest(t) @@ -71,3 +73,12 @@ func TestFunctionWithCorrectParamMiddleCorrectLine(t *testing.T) { partitiontest.PartitionTest(t) println("something") } + +func TestFunctionWithCorrectParamLeadingDifferentCall(t *testing.T) { // want "Add missing partition call to top of test" + helperFunction(t) +} + +func TestFunctionWithPartitionComment(t *testing.T) { + // partitiontest.PartitionTest(t) + println("something") +} diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go index a32c37cc88..5d70be44ad 100644 --- a/cmd/pingpong/runCmd.go +++ b/cmd/pingpong/runCmd.go @@ -103,7 +103,7 @@ func init() { runCmd.Flags().Uint64VarP(&minAccountFunds, "minaccount", "", 0, "The minimum amount to fund a test account with") runCmd.Flags().Uint64VarP(&txnPerSec, "tps", "t", 0, "Number of Txn per second that pingpong sends") runCmd.Flags().Int64VarP(&maxFee, "mf", "f", -1, "The MAX fee to be used for transactions, a value of '0' tells the server to use a suggested fee.") - runCmd.Flags().Uint64VarP(&minFee, "minf", "m", 1000, "The MIN fee to be used for randomFee transactions") + runCmd.Flags().Uint64VarP(&minFee, "minf", "m", 0, "The MIN fee to be used for randomFee transactions (0 will use suggested fee)") runCmd.Flags().BoolVar(&randomAmount, "ra", false, "Set to enable random amounts (up to maxamount)") runCmd.Flags().BoolVar(&noRandomAmount, "nra", false, "Set to disable random amounts") runCmd.Flags().BoolVar(&randomFee, "rf", false, "Set to enable random fees (between minf and mf)") diff --git a/cmd/tealdbg/cdtSession.go b/cmd/tealdbg/cdtSession.go index 0b15ce11a9..71d25c7348 100644 --- a/cmd/tealdbg/cdtSession.go +++ b/cmd/tealdbg/cdtSession.go @@ -378,7 +378,7 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) ( var desc []cdt.RuntimePropertyDescriptor desc, err = state.getObjectDescriptor(objID, preview) if err != nil { - err = fmt.Errorf("getObjectDescriptor error: " + err.Error()) + err = fmt.Errorf("getObjectDescriptor error: %w", err) return } @@ -386,7 +386,7 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) ( var data []byte data, err = json.Marshal(desc) if err != nil { - err = fmt.Errorf("getObjectDescriptor json error: " + err.Error()) + err = fmt.Errorf("getObjectDescriptor json error: %w", err) return } log.Printf("Desc object: %s", string(data)) diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go index b7dc3def64..b84d5916a7 100644 --- a/cmd/tealdbg/cdtState.go +++ b/cmd/tealdbg/cdtState.go @@ -600,8 +600,8 @@ func decodeNestedObjID(objID string) (string, []int, bool) { } groupIDs := objID[len(prefix)+1:] - parts := strings.Split(groupIDs, "_") - for _, id := range parts { + parts := strings.SplitSeq(groupIDs, "_") + for id := range parts { if val, err := strconv.ParseInt(id, 10, 32); err == nil { parsedIDs = append(parsedIDs, int(val)) } else { diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index 4756e9b731..5921b9e0cd 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -90,12 +90,8 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err // if conversion failed report all intermediate decoding errors if err != nil { - if err1 != nil { - log.Printf("Decoding as JSON txn failed: %s", err1.Error()) - } - if err2 != nil { - log.Printf("Decoding as JSON txn group failed: %s", err2.Error()) - } + log.Printf("Decoding as JSON txn failed: %v", err1) + log.Printf("Decoding as JSON txn group failed: %v", err2) } return @@ -141,12 +137,8 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord, // if conversion failed report all intermediate decoding errors if err != nil { - if err1 != nil { - log.Printf("Decoding as JSON record failed: %s", err1.Error()) - } - if err2 != nil { - log.Printf("Decoding as JSON array of records failed: %s", err2.Error()) - } + log.Printf("Decoding as JSON record failed: %v", err1) + log.Printf("Decoding as JSON array of records failed: %v", err2) } return diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go index 9e4ecb25ef..f789253768 100644 --- a/cmd/tealdbg/local_test.go +++ b/cmd/tealdbg/local_test.go @@ -1248,15 +1248,19 @@ int 1` } balanceBlob := protocol.EncodeMsgp(&br) + // Get proto using the same lookup that LocalRunner.Setup will use + _, proto, err := protoFromString(string(protocol.ConsensusCurrentVersion)) + a.NoError(err) + // two testcase: success with enough fees and fail otherwise var tests = []struct { fee uint64 expected func(LocalRunner, runAllResult) }{ - {2000, func(l LocalRunner, r runAllResult) { + {2 * proto.MinTxnFee, func(l LocalRunner, r runAllResult) { a.Equal(allPassing(len(l.runs)), r) }}, - {1500, func(_ LocalRunner, r runAllResult) { + {proto.MinTxnFee + proto.MinTxnFee/2, func(_ LocalRunner, r runAllResult) { a.Condition(allErrors(r.allErrors())) for _, result := range r.results { a.False(result.pass) diff --git a/cmd/util/cmd.go b/cmd/util/cmd.go index ec72cfceac..f1f6c58022 100644 --- a/cmd/util/cmd.go +++ b/cmd/util/cmd.go @@ -106,8 +106,8 @@ func (c *CobraStringSliceValue) IsSet() bool { return c.isSet } // Set sets a value and fails if it is not allowed func (c *CobraStringSliceValue) Set(values string) error { - others := strings.Split(values, ",") - for _, other := range others { + others := strings.SplitSeq(values, ",") + for other := range others { other = strings.TrimSpace(other) if _, ok := c.allowedMap[other]; ok { c.value = append(c.value, other) diff --git a/cmd/util/datadir/datadir.go b/cmd/util/datadir/datadir.go index 8040ccaec2..3c9364f7e6 100644 --- a/cmd/util/datadir/datadir.go +++ b/cmd/util/datadir/datadir.go @@ -24,7 +24,7 @@ import ( // DataDirs contains the list of data directories var DataDirs []string -// ResolveDataDir determines the data directory to to use. +// ResolveDataDir determines the data directory to use. // If not specified on cmdline with '-d', look for default in environment. func ResolveDataDir() string { var dir string diff --git a/config/config_test.go b/config/config_test.go index 68928d5908..88e90289e3 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -525,7 +525,7 @@ func TestLocal_StructTags(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - localType := reflect.TypeOf(Local{}) + localType := reflect.TypeFor[Local]() versionField, ok := localType.FieldByName("Version") require.True(t, ok) @@ -580,7 +580,7 @@ func TestLocal_VersionField(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - localType := reflect.TypeOf(Local{}) + localType := reflect.TypeFor[Local]() field, ok := localType.FieldByName("Version") require.True(t, true, ok) ver := 0 @@ -861,6 +861,10 @@ func (l tLogger) Infof(fmts string, args ...interface{}) { l.t.Logf(fmts, args...) } +func (l tLogger) Warnf(fmts string, args ...interface{}) { + l.t.Logf(fmts, args...) +} + // TestEnsureAndResolveGenesisDirs confirms that paths provided in the config are resolved to absolute paths and are created if relevant func TestEnsureAndResolveGenesisDirs(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/config/consensus.go b/config/consensus.go index 36daad899b..3045b83eb9 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -17,6 +17,7 @@ package config import ( + "maps" "time" "github.com/algorand/go-algorand/config/bounds" @@ -768,13 +769,7 @@ func (cp ConsensusProtocols) DeepCopy() ConsensusProtocols { staticConsensus := make(ConsensusProtocols) for consensusVersion, consensusParams := range cp { // recreate the ApprovedUpgrades map since we don't want to modify the original one. - if consensusParams.ApprovedUpgrades != nil { - newApprovedUpgrades := make(map[protocol.ConsensusVersion]uint64) - for ver, when := range consensusParams.ApprovedUpgrades { - newApprovedUpgrades[ver] = when - } - consensusParams.ApprovedUpgrades = newApprovedUpgrades - } + consensusParams.ApprovedUpgrades = maps.Clone(consensusParams.ApprovedUpgrades) staticConsensus[consensusVersion] = consensusParams } return staticConsensus diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go index 385726201b..25bd60b165 100644 --- a/config/defaultsGenerator/defaultsGenerator.go +++ b/config/defaultsGenerator/defaultsGenerator.go @@ -111,7 +111,7 @@ func (a byFieldName) Less(i, j int) bool { } func prettyPrint(c config.Local, format string) (out string) { - localType := reflect.TypeOf(c) + localType := reflect.TypeFor[config.Local]() fields := []reflect.StructField{} for fieldNum := 0; fieldNum < localType.NumField(); fieldNum++ { diff --git a/config/dnsbootstrap.go b/config/dnsbootstrap.go index d6780384b3..b01876cc3b 100644 --- a/config/dnsbootstrap.go +++ b/config/dnsbootstrap.go @@ -107,7 +107,7 @@ func parseDNSBootstrap(dnsBootstrapID string, network protocol.NetworkID, defaul } // Normalize the dnsBootstrapID and insert the network - dnsBootstrapID = strings.Replace(strings.TrimSpace(strings.ToLower(dnsBootstrapID)), "", string(network), -1) + dnsBootstrapID = strings.ReplaceAll(strings.TrimSpace(strings.ToLower(dnsBootstrapID)), "", string(network)) if dnsBootstrapID == "" { return nil, errors.New(bootstrapErrorEmpty) diff --git a/config/localTemplate.go b/config/localTemplate.go index 8525031f59..523707ba23 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -43,7 +43,7 @@ type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. This field tag must be updated any time we add a new version. - Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33" version[34]:"34" version[35]:"35" version[36]:"36"` + Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33" version[34]:"34" version[35]:"35" version[36]:"36" version[37]:"37"` // Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only affects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync. Archival bool `version[0]:"false"` @@ -326,7 +326,7 @@ type Local struct { // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header // field can be used. - // This setting does not support multiple X-Forwarded-For HTTP headers or multiple values in in the header and always uses the last value + // This setting does not support multiple X-Forwarded-For HTTP headers or multiple values in the header and always uses the last value // from the last X-Forwarded-For HTTP header that corresponds to a single reverse proxy (even if it received the request from another reverse proxy or adversary node). // // WARNING: By enabling this option, you are trusting peers to provide accurate forwarding addresses. @@ -645,6 +645,18 @@ type Local struct { // EnableVoteCompression controls whether vote compression is enabled for websocket networks EnableVoteCompression bool `version[36]:"true"` + + // StatefulVoteCompressionTableSize controls the size of the per-peer tables used for vote compression. + // If 0, stateful vote compression is disabled (but stateless vote compression will still be used if + // EnableVoteCompression is true). This value should be a power of 2 between 16 and 2048, inclusive. + // The per-peer overhead for stateful compression in one direction (from peer A => B) is 224 bytes times + // this value, plus 800 bytes of fixed overhead; it is twice that if votes are also being sent from B => A. + // So the default value of 2048 requires 459,552 bytes of memory per peer for stateful vote compression + // in one direction, or 919,104 bytes if both directions are used. + StatefulVoteCompressionTableSize uint `version[37]:"2048"` + + // EnableBatchVerification controls whether ed25519 batch verification is enabled + EnableBatchVerification bool `version[37]:"true"` } // DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers @@ -666,8 +678,8 @@ func (cfg Local) ValidateDNSBootstrapArray(networkID protocol.NetworkID) ([]*DNS func (cfg Local) internalValidateDNSBootstrapArray(networkID protocol.NetworkID) ( bootstrapArray []*DNSBootstrap, err error) { - bootstrapStringArray := strings.Split(cfg.DNSBootstrapID, ";") - for _, bootstrapString := range bootstrapStringArray { + bootstrapStringArray := strings.SplitSeq(cfg.DNSBootstrapID, ";") + for bootstrapString := range bootstrapStringArray { if len(strings.TrimSpace(bootstrapString)) == 0 { continue } @@ -868,6 +880,7 @@ func (cfg *Local) ResolveLogPaths(rootDir string) (liveLog, archive string) { type logger interface { Infof(format string, args ...interface{}) + Warnf(format string, args ...interface{}) } // EnsureAndResolveGenesisDirs will resolve the supplied config paths to absolute paths, and will create the genesis directories of each @@ -1056,3 +1069,35 @@ func (cfg *Local) TracksCatchpoints() bool { } return false } + +// NormalizedVoteCompressionTableSize validates and normalizes the StatefulVoteCompressionTableSize config value. +// Supported values are powers of 2 in the range [16, 2048]. +// Values >= 2048 clamp to 2048. +// Values 1-15 are below the minimum and return 0 (disabled). +// Values between supported powers of 2 round down to the nearest supported value. +// Logs a message if the configured value is adjusted. +// Returns the normalized size. +func (cfg Local) NormalizedVoteCompressionTableSize(log logger) uint { + configured := cfg.StatefulVoteCompressionTableSize + if configured == 0 { + return 0 + } + if configured < 16 { + log.Warnf("StatefulVoteCompressionTableSize configured as %d is invalid (minimum 16). Stateful vote compression disabled.", configured) + return 0 + } + // Round down to nearest power of 2 within supported range [16, 2048] + supportedSizes := []uint{2048, 1024, 512, 256, 128, 64, 32, 16} + for _, size := range supportedSizes { + if configured >= size { + if configured != size { + log.Infof("StatefulVoteCompressionTableSize configured as %d, using nearest supported value: %d", configured, size) + } + return size + } + } + + // Should never reach here given the checks above + log.Warnf("StatefulVoteCompressionTableSize configured as %d is invalid. Stateful vote compression disabled.", configured) + return 0 +} diff --git a/config/local_defaults.go b/config/local_defaults.go index 70df924d87..ad090e0289 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -20,7 +20,7 @@ package config var defaultLocal = Local{ - Version: 36, + Version: 37, AccountUpdatesStatsInterval: 5000000000, AccountsRebuildSynchronousMode: 1, AgreementIncomingBundlesQueueLength: 15, @@ -63,6 +63,7 @@ var defaultLocal = Local{ EnableAgreementReporting: false, EnableAgreementTimeMetrics: false, EnableAssembleStats: false, + EnableBatchVerification: true, EnableBlockService: false, EnableDHTProviders: false, EnableDeveloperAPI: false, @@ -139,6 +140,7 @@ var defaultLocal = Local{ RestReadTimeoutSeconds: 15, RestWriteTimeoutSeconds: 120, RunHosted: false, + StatefulVoteCompressionTableSize: 2048, StateproofDir: "", StorageEngine: "sqlite", SuggestedFeeBlockHistory: 3, diff --git a/config/migrate.go b/config/migrate.go index 5522454094..238725af47 100644 --- a/config/migrate.go +++ b/config/migrate.go @@ -54,7 +54,7 @@ func migrate(cfg Local) (newCfg Local, migrations []MigrationResult, err error) break } defaultCurrentConfig := GetVersionedDefaultLocalConfig(newCfg.Version) - localType := reflect.TypeOf(Local{}) + localType := reflect.TypeFor[Local]() nextVersion := newCfg.Version + 1 for fieldNum := 0; fieldNum < localType.NumField(); fieldNum++ { field := localType.Field(fieldNum) @@ -163,7 +163,7 @@ func migrate(cfg Local) (newCfg Local, migrations []MigrationResult, err error) } func getLatestConfigVersion() uint32 { - localType := reflect.TypeOf(Local{}) + localType := reflect.TypeFor[Local]() versionField, found := localType.FieldByName("Version") if !found { return 0 @@ -184,7 +184,7 @@ func GetVersionedDefaultLocalConfig(version uint32) (local Local) { local = GetVersionedDefaultLocalConfig(version - 1) } // apply version specific changes. - localType := reflect.TypeOf(local) + localType := reflect.TypeFor[Local]() for fieldNum := 0; fieldNum < localType.NumField(); fieldNum++ { field := localType.Field(fieldNum) versionDefaultValue, hasTag := reflect.StructTag(field.Tag).Lookup(fmt.Sprintf("version[%d]", version)) diff --git a/config/version.go b/config/version.go index fb08d9c578..e80842afbc 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 4 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 3 +const VersionMinor = 4 // Version is the type holding our full version information. type Version struct { diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go index 65b2febeaa..cd0168ae1f 100644 --- a/crypto/batchverifier.go +++ b/crypto/batchverifier.go @@ -73,16 +73,35 @@ func ed25519_randombytes_unsafe(p unsafe.Pointer, len C.size_t) { const minBatchVerifierAlloc = 16 const useSingleVerifierDefault = true -// MakeBatchVerifier creates a BatchVerifier instance with the provided options. +// ed25519BatchVerifierFactory is the global singleton used for batch signature verification. +// By default it uses the libsodium implementation. This can be changed during initialization +// (e.g., by the config package when algod loads) to use the ed25519consensus implementation. +var ed25519BatchVerifierFactory func(hint int) BatchVerifier = makeLibsodiumBatchVerifier + +// SetEd25519BatchVerifier allows the config package to switch the implementation +// at startup based on configuration. Pass true to use ed25519consensus, false for libsodium. +func SetEd25519BatchVerifier(useEd25519Consensus bool) { + if useEd25519Consensus { + ed25519BatchVerifierFactory = makeEd25519ConsensusBatchVerifier + } else { + ed25519BatchVerifierFactory = makeLibsodiumBatchVerifier + } +} + +// MakeBatchVerifier creates a BatchVerifier instance. func MakeBatchVerifier() BatchVerifier { - return MakeBatchVerifierWithHint(minBatchVerifierAlloc) + return ed25519BatchVerifierFactory(minBatchVerifierAlloc) } -// MakeBatchVerifierWithHint creates a cgoBatchVerifier instance. This function pre-allocates -// amount of free space to enqueue signatures without expanding +// MakeBatchVerifierWithHint creates a BatchVerifier instance. This function pre-allocates +// space to enqueue signatures without expanding. func MakeBatchVerifierWithHint(hint int) BatchVerifier { + return ed25519BatchVerifierFactory(hint) +} + +func makeLibsodiumBatchVerifier(hint int) BatchVerifier { // preallocate enough storage for the expected usage. We will reallocate as needed. - if hint < minBatchVerifierAlloc { + if hint <= 0 { hint = minBatchVerifierAlloc } return &cgoBatchVerifier{ @@ -152,7 +171,7 @@ func (b *cgoBatchVerifier) VerifyWithFeedback() (failed []bool, err error) { } allValid, failed := cgoBatchVerificationImpl(messages, msgLengths, b.publicKeys, b.signatures) if allValid { - return failed, nil + return nil, nil } return failed, ErrBatchHasFailedSigs } @@ -170,7 +189,7 @@ func (b *cgoBatchVerifier) singleVerify() (failed []bool, err error) { if containsFailed { return failed, ErrBatchHasFailedSigs } - return failed, nil + return nil, nil } // cgoBatchVerificationImpl invokes the ed25519 batch verification algorithm. @@ -185,18 +204,26 @@ func cgoBatchVerificationImpl(messages []byte, msgLengths []uint64, publicKeys [ signatures2D := make([]*C.uchar, numberOfSignatures) // call the batch verifier + // Use unsafe.SliceData to safely get pointers to underlying arrays allValid := C.ed25519_batch_wrapper( - &messages2D[0], &publicKeys2D[0], &signatures2D[0], - (*C.uchar)(&messages[0]), - (*C.ulonglong)(&msgLengths[0]), - (*C.uchar)(&publicKeys[0][0]), - (*C.uchar)(&signatures[0][0]), + (**C.uchar)(unsafe.SliceData(messages2D)), + (**C.uchar)(unsafe.SliceData(publicKeys2D)), + (**C.uchar)(unsafe.SliceData(signatures2D)), + (*C.uchar)(unsafe.SliceData(messages)), + (*C.ulonglong)(unsafe.SliceData(msgLengths)), + (*C.uchar)(unsafe.SliceData(publicKeys[0][:])), + (*C.uchar)(unsafe.SliceData(signatures[0][:])), C.size_t(numberOfSignatures), - (*C.int)(&valid[0])) + (*C.int)(unsafe.SliceData(valid))) + + if allValid == 0 { // all signatures valid + return true, nil + } + // not all signatures valid, identify the failed signatures failed = make([]bool, numberOfSignatures) for i := 0; i < numberOfSignatures; i++ { failed[i] = (valid[i] == 0) } - return allValid == 0, failed + return false, failed } diff --git a/crypto/batchverifier_bench_test.go b/crypto/batchverifier_bench_test.go new file mode 100644 index 0000000000..c676a5265e --- /dev/null +++ b/crypto/batchverifier_bench_test.go @@ -0,0 +1,123 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package crypto + +import ( + cryptorand "crypto/rand" + "io" + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func randSignedMsg(t testing.TB, r io.Reader) (SignatureVerifier, Hashable, Signature) { + mlen := 100 + msg := TestingHashable{data: make([]byte, mlen)} + n, err := r.Read(msg.data) + require.NoError(t, err) + require.Equal(t, n, mlen) + var s Seed + n, err = r.Read(s[:]) + require.NoError(t, err) + require.Equal(t, 32, n) + secrets := GenerateSignatureSecrets(s) + return secrets.SignatureVerifier, msg, secrets.Sign(msg) +} + +// BenchmarkBatchVerifierImpls benchmarks different batch verification implementations +// with realistic batch sizes (100 batches of 64 signatures each) +func BenchmarkBatchVerifierImpls(b *testing.B) { + partitiontest.PartitionTest(b) + + numBatches := 100 + batchSize := 64 + msgs := make([][]Hashable, numBatches) + pks := make([][]SignatureVerifier, numBatches) + sigs := make([][]Signature, numBatches) + r := cryptorand.Reader + for i := 0; i < numBatches; i++ { + for j := 0; j < batchSize; j++ { + pk, msg, sig := randSignedMsg(b, r) + msgs[i] = append(msgs[i], msg) + pks[i] = append(pks[i], pk) + sigs[i] = append(sigs[i], sig) + } + } + + b.Log("running with", b.N, "iterations using", len(msgs), "batches of", batchSize, "signatures") + runImpl := func(b *testing.B, bv BatchVerifier, + msgs [][]Hashable, pks [][]SignatureVerifier, sigs [][]Signature) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + batchIdx := i % numBatches + for j := range msgs[batchIdx] { + bv.EnqueueSignature(pks[batchIdx][j], msgs[batchIdx][j], sigs[batchIdx][j]) + } + require.NoError(b, bv.Verify()) + } + } + + b.Run("libsodium_single", func(b *testing.B) { + bv := makeLibsodiumBatchVerifier(batchSize) + bv.(*cgoBatchVerifier).useSingle = true + runImpl(b, bv, msgs, pks, sigs) + }) + b.Run("libsodium_batch", func(b *testing.B) { + bv := makeLibsodiumBatchVerifier(batchSize) + bv.(*cgoBatchVerifier).useSingle = false + runImpl(b, bv, msgs, pks, sigs) + }) + b.Run("ed25519consensus", func(b *testing.B) { + bv := makeEd25519ConsensusBatchVerifier(batchSize) + runImpl(b, bv, msgs, pks, sigs) + }) +} + +func BenchmarkCanonicalityCheck(b *testing.B) { + partitiontest.PartitionTest(b) + + const maxN = 10000 + pubkeys := make([]SignatureVerifier, maxN) + sigs := make([]Signature, maxN) + for i := 0; i < maxN; i++ { + var s Seed + RandBytes(s[:]) + sigSecrets := GenerateSignatureSecrets(s) + pubkeys[i] = sigSecrets.SignatureVerifier + msg := randString() + sigs[i] = sigSecrets.Sign(msg) + } + + b.Run("pubkey_check", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = isCanonicalPoint(pubkeys[i%maxN]) + } + }) + + b.Run("signature_R_check", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = isCanonicalPoint([32]byte(sigs[i%maxN][:32])) + } + }) + + b.Run("both_checks", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = !isCanonicalPoint(pubkeys[i%maxN]) || !isCanonicalPoint([32]byte(sigs[i%maxN][:32])) + } + }) +} diff --git a/crypto/batchverifier_test.go b/crypto/batchverifier_test.go index 6f3c5954fc..ac91c19023 100644 --- a/crypto/batchverifier_test.go +++ b/crypto/batchverifier_test.go @@ -27,10 +27,42 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) +// runnableTB is an interface constraint for types that have both testing.TB methods and Run +type runnableTB[T any] interface { + testing.TB + Run(string, func(T)) bool +} + +// runBatchVerifierImpls runs testing.{T,B}.Run against 3 batch verifier implementations as subtests. +func runBatchVerifierImpls[T runnableTB[T]](tb T, runFunc func(T, func(int) BatchVerifier)) { + tb.Run("libsodium_single", func(t T) { + runFunc(t, func(hint int) BatchVerifier { + bv := makeLibsodiumBatchVerifier(hint) + bv.(*cgoBatchVerifier).useSingle = true + return bv + }) + }) + tb.Run("libsodium_batch", func(t T) { + runFunc(t, func(hint int) BatchVerifier { + bv := makeLibsodiumBatchVerifier(hint) + bv.(*cgoBatchVerifier).useSingle = false + return bv + }) + }) + tb.Run("ed25519consensus", func(t T) { + runFunc(t, func(hint int) BatchVerifier { + return makeEd25519ConsensusBatchVerifier(hint) + }) + }) +} + func TestBatchVerifierSingle(t *testing.T) { partitiontest.PartitionTest(t) + runBatchVerifierImpls(t, testBatchVerifierSingle) +} +func testBatchVerifierSingle(t *testing.T, makeBV func(int) BatchVerifier) { // test expected success - bv := MakeBatchVerifier() + bv := makeBV(0) msg := randString() var s Seed RandBytes(s[:]) @@ -40,7 +72,7 @@ func TestBatchVerifierSingle(t *testing.T) { require.NoError(t, bv.Verify()) // test expected failure - bv = MakeBatchVerifier() + bv = makeBV(0) msg = randString() RandBytes(s[:]) sigSecrets = GenerateSignatureSecrets(s) @@ -53,9 +85,12 @@ func TestBatchVerifierSingle(t *testing.T) { func TestBatchVerifierBulk(t *testing.T) { partitiontest.PartitionTest(t) + runBatchVerifierImpls(t, testBatchVerifierBulk) +} +func testBatchVerifierBulk(t *testing.T, makeBV func(int) BatchVerifier) { for i := 1; i < 64*2+3; i++ { n := i - bv := MakeBatchVerifierWithHint(n) + bv := makeBV(n) var s Seed for i := 0; i < n; i++ { @@ -68,13 +103,15 @@ func TestBatchVerifierBulk(t *testing.T) { require.Equal(t, n, bv.GetNumberOfEnqueuedSignatures()) require.NoError(t, bv.Verify()) } - } func TestBatchVerifierBulkWithExpand(t *testing.T) { partitiontest.PartitionTest(t) + runBatchVerifierImpls(t, testBatchVerifierBulkWithExpand) +} +func testBatchVerifierBulkWithExpand(t *testing.T, makeBV func(int) BatchVerifier) { n := 64 - bv := MakeBatchVerifier() + bv := makeBV(0) // Start with no hint to test expansion var s Seed RandBytes(s[:]) @@ -89,8 +126,11 @@ func TestBatchVerifierBulkWithExpand(t *testing.T) { func TestBatchVerifierWithInvalidSiganture(t *testing.T) { partitiontest.PartitionTest(t) + runBatchVerifierImpls(t, testBatchVerifierWithInvalidSignature) +} +func testBatchVerifierWithInvalidSignature(t *testing.T, makeBV func(int) BatchVerifier) { n := 64 - bv := MakeBatchVerifier() + bv := makeBV(0) var s Seed RandBytes(s[:]) @@ -111,8 +151,11 @@ func TestBatchVerifierWithInvalidSiganture(t *testing.T) { } func BenchmarkBatchVerifier(b *testing.B) { + runBatchVerifierImpls(b, benchmarkBatchVerifier) +} +func benchmarkBatchVerifier(b *testing.B, makeBV func(int) BatchVerifier) { c := makeCurve25519Secret() - bv := MakeBatchVerifierWithHint(1) + bv := makeBV(1) for i := 0; i < b.N; i++ { str := randString() bv.EnqueueSignature(c.SignatureVerifier, str, c.Sign(str)) @@ -125,9 +168,12 @@ func BenchmarkBatchVerifier(b *testing.B) { // BenchmarkBatchVerifierBig with b.N over 1000 will report the expected performance // gain as the batchsize increases. All sigs are valid. func BenchmarkBatchVerifierBig(b *testing.B) { + runBatchVerifierImpls(b, benchmarkBatchVerifierBig) +} +func benchmarkBatchVerifierBig(b *testing.B, makeBV func(int) BatchVerifier) { c := makeCurve25519Secret() for batchSize := 1; batchSize <= 96; batchSize++ { - bv := MakeBatchVerifierWithHint(batchSize) + bv := makeBV(batchSize) for i := 0; i < batchSize; i++ { str := randString() bv.EnqueueSignature(c.SignatureVerifier, str, c.Sign(str)) @@ -149,16 +195,23 @@ func BenchmarkBatchVerifierBig(b *testing.B) { // invalid sigs to even numbered batch sizes. This shows the impact of invalid sigs on the // performance. Basically, all the gains from batching disappear. func BenchmarkBatchVerifierBigWithInvalid(b *testing.B) { + runBatchVerifierImpls(b, benchmarkBatchVerifierBigWithInvalid) +} +func benchmarkBatchVerifierBigWithInvalid(b *testing.B, makeBV func(int) BatchVerifier) { c := makeCurve25519Secret() badSig := Signature{} for batchSize := 1; batchSize <= 96; batchSize++ { - bv := MakeBatchVerifierWithHint(batchSize) + bv := makeBV(batchSize) + sigs := make([]Signature, batchSize) for i := 0; i < batchSize; i++ { str := randString() if batchSize%2 == 0 && (i == 0 || rand.Float32() < 0.1) { bv.EnqueueSignature(c.SignatureVerifier, str, badSig) + sigs[i] = badSig } else { - bv.EnqueueSignature(c.SignatureVerifier, str, c.Sign(str)) + sig := c.Sign(str) + bv.EnqueueSignature(c.SignatureVerifier, str, sig) + sigs[i] = sig } } b.Run(fmt.Sprintf("running batchsize %d", batchSize), func(b *testing.B) { @@ -170,13 +223,16 @@ func BenchmarkBatchVerifierBigWithInvalid(b *testing.B) { for x := 0; x < count; x++ { failed, err := bv.VerifyWithFeedback() if err != nil { + require.Len(b, failed, batchSize) for i, f := range failed { - if bv.(*cgoBatchVerifier).signatures[i] == badSig { + if sigs[i] == badSig { require.True(b, f) } else { require.False(b, f) } } + } else { + require.Nil(b, failed) } } }) @@ -185,22 +241,27 @@ func BenchmarkBatchVerifierBigWithInvalid(b *testing.B) { func TestEmpty(t *testing.T) { partitiontest.PartitionTest(t) - bv := MakeBatchVerifier() + runBatchVerifierImpls(t, testEmpty) +} +func testEmpty(t *testing.T, makeBV func(int) BatchVerifier) { + bv := makeBV(0) require.NoError(t, bv.Verify()) failed, err := bv.VerifyWithFeedback() require.NoError(t, err) - require.Empty(t, failed) + require.Nil(t, failed) } // TestBatchVerifierIndividualResults tests that VerifyWithFeedback // returns the correct failed signature indexes func TestBatchVerifierIndividualResults(t *testing.T) { partitiontest.PartitionTest(t) - + runBatchVerifierImpls(t, testBatchVerifierIndividualResults) +} +func testBatchVerifierIndividualResults(t *testing.T, makeBV func(int) BatchVerifier) { for i := 1; i < 64*2+3; i++ { n := i - bv := MakeBatchVerifierWithHint(n) + bv := makeBV(n) var s Seed badSigs := make([]bool, n, n) hasBadSig := false @@ -221,12 +282,13 @@ func TestBatchVerifierIndividualResults(t *testing.T) { failed, err := bv.VerifyWithFeedback() if hasBadSig { require.ErrorIs(t, err, ErrBatchHasFailedSigs) + require.Equal(t, len(badSigs), len(failed)) + for i := range badSigs { + require.Equal(t, badSigs[i], failed[i]) + } } else { require.NoError(t, err) - } - require.Equal(t, len(badSigs), len(failed)) - for i := range badSigs { - require.Equal(t, badSigs[i], failed[i]) + require.Nil(t, failed) } } } @@ -235,10 +297,12 @@ func TestBatchVerifierIndividualResults(t *testing.T) { // returns the correct failed signature indexes when all are valid func TestBatchVerifierIndividualResultsAllValid(t *testing.T) { partitiontest.PartitionTest(t) - + runBatchVerifierImpls(t, testBatchVerifierIndividualResultsAllValid) +} +func testBatchVerifierIndividualResultsAllValid(t *testing.T, makeBV func(int) BatchVerifier) { for i := 1; i < 64*2+3; i++ { n := i - bv := MakeBatchVerifierWithHint(n) + bv := makeBV(n) var s Seed for i := 0; i < n; i++ { msg := randString() @@ -250,10 +314,7 @@ func TestBatchVerifierIndividualResultsAllValid(t *testing.T) { require.Equal(t, n, bv.GetNumberOfEnqueuedSignatures()) failed, err := bv.VerifyWithFeedback() require.NoError(t, err) - require.Equal(t, bv.GetNumberOfEnqueuedSignatures(), len(failed)) - for _, f := range failed { - require.False(t, f) - } + require.Nil(t, failed) } } @@ -265,7 +326,7 @@ func TestBatchVerifierGC(t *testing.T) { t.Run("", func(t *testing.T) { t.Parallel() - bv := MakeBatchVerifierWithHint(n) + bv := makeLibsodiumBatchVerifier(n) var s Seed for i := 0; i < n; i++ { diff --git a/crypto/gobatchverifier.go b/crypto/gobatchverifier.go new file mode 100644 index 0000000000..46fcd0cae5 --- /dev/null +++ b/crypto/gobatchverifier.go @@ -0,0 +1,203 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package crypto + +import ( + "bytes" + + "github.com/hdevalence/ed25519consensus" +) + +// ed25519ConsensusVerifySingle performs single signature verification using ed25519consensus, +// with additional checks to reject non-canonical encodings and small-order public keys. +func ed25519ConsensusVerifySingle(publicKey [32]byte, message []byte, signature [64]byte) bool { + // Check for non-canonical public key or R (first 32 bytes of signature), and reject small-order public keys + if !isCanonicalPoint(publicKey) || !isCanonicalPoint([32]byte(signature[:32])) || hasSmallOrder(publicKey) { + return false + } + + return ed25519consensus.Verify(publicKey[:], message, signature[:]) +} + +type ed25519ConsensusVerifyEntry struct { + msgHashRep []byte + publicKey SignatureVerifier + signature Signature + failedChecks bool +} + +type ed25519ConsensusBatchVerifier struct { + entries []ed25519ConsensusVerifyEntry // used in VerifyWithFeedback to identify failed signatures + failedChecks bool // true if any entry failed non-canonical or small-order checks + bv ed25519consensus.BatchVerifier +} + +func makeEd25519ConsensusBatchVerifier(hint int) BatchVerifier { + if hint <= 0 { + hint = minBatchVerifierAlloc + } + return &ed25519ConsensusBatchVerifier{ + entries: make([]ed25519ConsensusVerifyEntry, 0, hint), + bv: ed25519consensus.NewPreallocatedBatchVerifier(hint), + } +} + +func (b *ed25519ConsensusBatchVerifier) EnqueueSignature(sigVerifier SignatureVerifier, message Hashable, sig Signature) { + msgHashRep := HashRep(message) + failedChecks := !isCanonicalPoint(sigVerifier) || !isCanonicalPoint([32]byte(sig[:32])) || hasSmallOrder(sigVerifier) + + entry := ed25519ConsensusVerifyEntry{ + msgHashRep: msgHashRep, + publicKey: sigVerifier, + signature: sig, + failedChecks: failedChecks, + } + b.entries = append(b.entries, entry) + + if failedChecks { + b.failedChecks = true + } else { + b.bv.Add(sigVerifier[:], msgHashRep, sig[:]) + } +} + +func (b *ed25519ConsensusBatchVerifier) GetNumberOfEnqueuedSignatures() int { + return len(b.entries) +} + +func (b *ed25519ConsensusBatchVerifier) Verify() error { + if len(b.entries) == 0 { + return nil + } + + // Fail if any pre-checks failed or if batch verification fails + if b.failedChecks || !b.bv.Verify() { + return ErrBatchHasFailedSigs + } + return nil +} + +func (b *ed25519ConsensusBatchVerifier) VerifyWithFeedback() (failed []bool, err error) { + if len(b.entries) == 0 { + return nil, nil + } + + if !b.failedChecks && b.bv.Verify() { + return nil, nil + } + + failed = make([]bool, len(b.entries)) + for i := range b.entries { + if b.entries[i].failedChecks { + failed[i] = true + } else { + failed[i] = !ed25519ConsensusVerifySingle(b.entries[i].publicKey, b.entries[i].msgHashRep, b.entries[i].signature) + } + } + + return failed, ErrBatchHasFailedSigs +} + +// Check that Y is canonical, using the succeed-fast algorithm from +// the "Taming the many EdDSAs" paper. +func isCanonicalY(p [32]byte) bool { + if p[0] < 237 { + return true + } + for i := 1; i < 31; i++ { + if p[i] != 255 { + return true + } + } + return (p[31] | 128) != 255 +} + +// isCanonicalPoint is a variable-time check that returns true if the +// 32-byte ed25519 point encoding is canonical. +func isCanonicalPoint(p [32]byte) bool { + if !isCanonicalY(p) { + return false + } + + // Test for the two cases with a non-canonical sign bit not caught by the + // non-canonical y-coordinate check above. They are points number 9 and 10 + // from Table 1 of the "Taming the many EdDSAs" paper. + if p == [32]byte{ // (−0, 1) + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + } || p == [32]byte{ // (-0, 2^255-20) + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } { + return false + } + + return true +} + +// from libsodium/crypto_core/ed25519/ref10/ed25519_ref10.c ge25519_has_small_order +var smallOrderPoints = [][32]byte{ + /* 0 (order 4) */ { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* 1 (order 1) */ { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* 2707385501144840649318225287225658788936804267575313519463743609750303402022 + (order 8) */{ + 0x26, 0xe8, 0x95, 0x8f, 0xc2, 0xb2, 0x27, 0xb0, 0x45, 0xc3, 0xf4, + 0x89, 0xf2, 0xef, 0x98, 0xf0, 0xd5, 0xdf, 0xac, 0x05, 0xd3, 0xc6, + 0x33, 0x39, 0xb1, 0x38, 0x02, 0x88, 0x6d, 0x53, 0xfc, 0x05}, + /* 55188659117513257062467267217118295137698188065244968500265048394206261417927 + (order 8) */{ + 0xc7, 0x17, 0x6a, 0x70, 0x3d, 0x4d, 0xd8, 0x4f, 0xba, 0x3c, 0x0b, + 0x76, 0x0d, 0x10, 0x67, 0x0f, 0x2a, 0x20, 0x53, 0xfa, 0x2c, 0x39, + 0xcc, 0xc6, 0x4e, 0xc7, 0xfd, 0x77, 0x92, 0xac, 0x03, 0x7a}, + /* p-1 (order 2) */ { + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, + /* p (=0, order 4) */ { + 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, + /* p+1 (=1, order 1) */ { + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, +} + +// hasSmallOrder checks if a point is in the small-order blacklist. +// Based on libsodium ge25519_has_small_order, but this version is variable-time. +func hasSmallOrder(p [32]byte) bool { + for _, point := range smallOrderPoints { + if !bytes.Equal(p[:31], point[:31]) { + continue + } + // For the last byte, ignore the sign bit (bit 7) + if (p[31] & 0x7f) == point[31] { + return true + } + } + return false +} diff --git a/crypto/gobatchverifier_test.go b/crypto/gobatchverifier_test.go new file mode 100644 index 0000000000..884e65eb44 --- /dev/null +++ b/crypto/gobatchverifier_test.go @@ -0,0 +1,1186 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package crypto + +import ( + "bufio" + "compress/gzip" + "crypto/ed25519" + "encoding/hex" + "encoding/json" + "fmt" + "math/rand" + "os" + "regexp" + "slices" + "strconv" + "strings" + "testing" + + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// ensure internal ed25519 types match the expected []byte lengths used by ed25519consensus package +func TestEd25519ConsensusBatchVerifierTypes(t *testing.T) { + partitiontest.PartitionTest(t) + + require.Len(t, ed25519PublicKey{}, ed25519.PublicKeySize) + require.Len(t, ed25519Signature{}, ed25519.SignatureSize) +} + +// Test vectors for 12 edge cases listed in Appendix C of "Taming the many EdDSAs" https://eprint.iacr.org/2020/1244 +// These are also checked in test_edge_cases in go-algorand/crypto/libsodium-fork/test/default/batch.c +func TestBatchVerifierTamingEdDSAsEdgeCases(t *testing.T) { + partitiontest.PartitionTest(t) + + hexVecs := make([]batchTestCaseHex, len(tamingEdDSAsTestVectors)) + expectedFail := make([]bool, len(tamingEdDSAsTestVectors)) + for i, tc := range tamingEdDSAsTestVectors { + hexVecs[i] = batchTestCaseHex{pkHex: tc.pk, sigHex: tc.sig, msgHex: tc.msg} + expectedFail[i] = tc.expectedFail + } + runBatchVerifierImpls(t, func(t *testing.T, makeBV func(int) BatchVerifier) { + testBatchVectors(t, makeBV, decodeHexTestCases(t, hexVecs), expectedFail) + }) +} + +// Test vectors from "It's 255:19AM" blog post about ZIP-215 development, also used to create the +// 14x14 visualizations of different criteria across implementations in Henry de Valence's blog post +// "It's 255:19AM..." https://hdevalence.ca/blog/2020-10-04-its-25519am/ +func TestBatchVerifierEd25519ConsensusTestData(t *testing.T) { + partitiontest.PartitionTest(t) + + const msgHex = "5a63617368" // used for all signatures in this test + hexVecs := make([]batchTestCaseHex, len(ed25519consensusCases)) + for i, tc := range ed25519consensusCases { + hexVecs[i] = batchTestCaseHex{pkHex: tc.pk, sigHex: tc.sig, msgHex: msgHex} + } + // All of these test vectors should fail, matching our strict criteria + expectedFail := make([]bool, len(hexVecs)) + for i := range expectedFail { + expectedFail[i] = true + } + runBatchVerifierImpls(t, func(t *testing.T, makeBV func(int) BatchVerifier) { + testBatchVectors(t, makeBV, decodeHexTestCases(t, hexVecs), expectedFail) + }) +} + +// Test vectors from unit tests for our libsodium- and ed25519-donna-based batch verification implementation +// introduced in PR #3031. +func TestBatchVerifierLibsodiumTestData(t *testing.T) { + partitiontest.PartitionTest(t) + + // read vectors hard-coded in test source file + const testVectorFile = "./libsodium-fork/test/default/batch.c" + const testVectorSize = 1025 + f, err := os.Open(testVectorFile) + if err != nil { + panic(err) + } + defer f.Close() + scanner := bufio.NewScanner(f) + + type testCase struct { + seed, pk, sig []byte + m string + } + var testCases []testCase + // each line is {{sk},{pk},{sig},"m"} where sk, pk, sig are comma-delimited lists of hex-encoded bytes + re := regexp.MustCompile(`\{\{(.*?)\},\{(.*?)\},\{(.*?)\},(.*?)\}`) + for i := 0; scanner.Scan(); i++ { + var tc testCase + line := scanner.Text() + matches := re.FindStringSubmatch(line) + if matches == nil || len(matches) != 5 { + continue + } + tc.seed = decodeCByteArray(matches[1], ed25519.SeedSize) + tc.pk = decodeCByteArray(matches[2], ed25519.PublicKeySize) + tc.sig = decodeCByteArray(matches[3], ed25519.SignatureSize) + tc.m, err = strconv.Unquote(matches[4]) + require.NoError(t, err) + testCases = append(testCases, tc) + } + t.Logf("loaded %d test vectors from %s", len(testCases), testVectorFile) + require.Len(t, testCases, testVectorSize, "not enough test vectors found") + + // check test data with libsodium-based ed25519Verify + for _, tc := range testCases { + require.True(t, ed25519Verify(ed25519PublicKey(tc.pk), []byte(tc.m), ed25519Signature(tc.sig))) + } + + // assert signing with test vector sk produces sig + for _, tc := range testCases { + pk, sk := ed25519GenerateKeySeed(ed25519Seed(tc.seed)) + require.Equal(t, tc.pk, []byte(pk[:])) + sig := ed25519Sign(sk, []byte(tc.m)) + require.Equal(t, tc.sig, []byte(sig[:])) + } + + // test different BatchVerifier implementations and batch sizes + testVectors := make([]batchTestCase, len(testCases)) + for i, tc := range testCases { + testVectors[i] = batchTestCase{pk: tc.pk, sig: tc.sig, msg: []byte(tc.m)} + } + expectedFail := make([]bool, len(testVectors)) // all should pass + runBatchVerifierImpls(t, func(t *testing.T, makeBV func(int) BatchVerifier) { + testBatchVectors(t, makeBV, testVectors, expectedFail) + }) +} + +// based on TestEd25519Vectors from go/src/crypto/ed25519/ed25519vectors_test.go +// which uses test vectors from filippo.io/mostly-harmless/ed25519vectors +func TestBatchVerifierFilippoVectors(t *testing.T) { + partitiontest.PartitionTest(t) + + var vectors []struct { + A, R, S, M string + Flags []string + } + f, err := os.Open("./testdata/ed25519vectors.json.gz") + require.NoError(t, err) + defer f.Close() + rd, err := gzip.NewReader(f) + require.NoError(t, err) + defer rd.Close() + err = json.NewDecoder(rd).Decode(&vectors) + require.NoError(t, err) + + expectedFail := make([]bool, len(vectors)) + hexVecs := make([]batchTestCaseHex, len(vectors)) + for i, v := range vectors { + for _, f := range v.Flags { + switch f { + case "LowOrderA": // reject small-order A + expectedFail[i] = true + case "NonCanonicalA", "NonCanonicalR": // reject non-canonical A or R + expectedFail[i] = true + case "LowOrderR": // small-order R allowed + case "LowOrderComponentR", "LowOrderComponentA": // torsion component allowed + case "LowOrderResidue": // cofactorless batch verification + default: + require.Fail(t, "unknown flag %q in test vector %d", f, i) + } + } + hexVecs[i] = batchTestCaseHex{pkHex: v.A, sigHex: v.R + v.S, msgHex: hex.EncodeToString([]byte(v.M))} + } + runBatchVerifierImpls(t, func(t *testing.T, makeBV func(int) BatchVerifier) { + testBatchVectors(t, makeBV, decodeHexTestCases(t, hexVecs), expectedFail) + }) + + // test isCanonicalPoint and hasSmallOrder against A and R + t.Run("ARchecks", func(t *testing.T) { + for _, v := range vectors { + A, err := hex.DecodeString(v.A) + require.NoError(t, err) + require.Equal(t, !slices.Contains(v.Flags, "NonCanonicalA"), isCanonicalPoint([32]byte(A))) + require.Equal(t, slices.Contains(v.Flags, "LowOrderA"), hasSmallOrder([32]byte(A))) + + R, err := hex.DecodeString(v.R) + require.NoError(t, err) + require.Equal(t, !slices.Contains(v.Flags, "NonCanonicalR"), isCanonicalPoint([32]byte(R))) + require.Equal(t, slices.Contains(v.Flags, "LowOrderR"), hasSmallOrder([32]byte(R))) + } + }) + +} + +// testBatchVectors tests a batch of signatures with expected pass/fail results using various batch sizes +func testBatchVectors(t *testing.T, makeBV func(int) BatchVerifier, testVectors []batchTestCase, expectedFail []bool) { + require.Len(t, expectedFail, len(testVectors)) + + // run a single batch of test vectors and compare to expected failures + runBatch := func(t *testing.T, vecs []batchTestCase, expFail []bool) { + bv := makeBV(len(vecs)) + for _, tv := range vecs { + bv.EnqueueSignature(SignatureVerifier(tv.pk), noHashID(tv.msg), Signature(tv.sig)) + } + failed, err := bv.VerifyWithFeedback() + if slices.Contains(expFail, true) { // some failures expected + require.Error(t, err) + require.NotNil(t, failed) + require.Len(t, failed, len(vecs)) + for i := range expFail { + assert.Equal(t, expFail[i], failed[i]) + } + } else { // no failures expected + require.NoError(t, err) + require.Nil(t, failed) + } + } + + // run all the test vectors in a single batch + t.Run("all", func(t *testing.T) { runBatch(t, testVectors, expectedFail) }) + + // split into multiple batches of different sizes, optionally shuffled + runBatchSizes := func(shuffle bool, vecs []batchTestCase, expFail []bool) { + if shuffle { + vecs, expFail = slices.Clone(vecs), slices.Clone(expFail) + rand.Shuffle(len(vecs), func(i, j int) { + vecs[i], vecs[j], expFail[i], expFail[j] = vecs[j], vecs[i], expFail[j], expFail[i] + }) + } + + for _, batchSize := range []int{1, 2, 4, 8, 16, 32, 64, 100, 128, 256, 512, 1024} { + if batchSize > len(vecs) { + continue + } + t.Run(fmt.Sprintf("batchSize=%d", batchSize), func(t *testing.T) { + vectorBatches := splitBatches(vecs, batchSize) + failBatches := splitBatches(expFail, batchSize) + require.Equal(t, len(vectorBatches), len(failBatches)) + //t.Logf("Testing with batch size %d: %d total signatures in %d batches", batchSize, n, len(vectorBatches)) + for i, batch := range vectorBatches { + batchExpectedFail := failBatches[i] + //t.Logf("Batch %d/%d: signatures [%d-%d), size=%d", i+1, len(vectorBatches), i*batchSize, i*batchSize+len(batch), len(batch)) + runBatch(t, batch, batchExpectedFail) + } + }) + } + } + + t.Run("unshuffled", func(t *testing.T) { runBatchSizes(false, testVectors, expectedFail) }) + t.Run("shuffled", func(t *testing.T) { runBatchSizes(true, testVectors, expectedFail) }) +} + +// splitBatches splits items into batches of the specified size +func splitBatches[T any](items []T, batchSize int) [][]T { + if batchSize <= 0 { + return nil + } + numBatches := len(items) / batchSize + if len(items)%batchSize != 0 { + numBatches++ + } + batches := make([][]T, numBatches) + + for i, item := range items { + batchIdx := i / batchSize + batches[batchIdx] = append(batches[batchIdx], item) + } + + return batches +} + +// decodeCByteArray decodes a string like "0x27,0x81," into a byte array of length n +func decodeCByteArray(hexList string, n int) []byte { + bytes := make([]byte, n) + words := strings.Split(hexList, ",") + // remove trailing empty string + if words[len(words)-1] == "" { + words = words[:len(words)-1] + } else { + panic("missing trailing comma") + } + if len(words) != n { + panic("wrong number of words") + } + for i, word := range words { + _, err := fmt.Sscanf(word, "0x%02x", &bytes[i]) + if err != nil { + panic(err) + } + } + return bytes +} + +type batchTestCaseHex struct{ pkHex, sigHex, msgHex string } +type batchTestCase struct{ pk, sig, msg []byte } + +// decodeHexTestCases converts hex-encoded test cases to byte arrays +func decodeHexTestCases(t *testing.T, hexCases []batchTestCaseHex) []batchTestCase { + cases := make([]batchTestCase, len(hexCases)) + for i, hc := range hexCases { + pk, err := hex.DecodeString(hc.pkHex) + require.NoError(t, err) + require.Len(t, pk, ed25519.PublicKeySize) + + sig, err := hex.DecodeString(hc.sigHex) + require.NoError(t, err) + require.Len(t, sig, ed25519.SignatureSize) + + msg, err := hex.DecodeString(hc.msgHex) + require.NoError(t, err) + + cases[i] = batchTestCase{pk: pk, sig: sig, msg: msg} + } + return cases +} + +// noHashID implements Hashable but returns an empty protocol.HashID for use +// with the test vectors, which should not be prefixed +type noHashID []byte + +func (n noHashID) ToBeHashed() (protocol.HashID, []byte) { return "", n } + +// Test vectors from Appendix C of "Taming the many EdDSAs" https://eprint.iacr.org/2020/1244 +var tamingEdDSAsTestVectors = []struct { + desc, msg, pk, sig string + expectedFail bool // Algorand-specific criteria +}{ + {"S = 0, small-order A, small-order R", + "8c93255d71dcab10e8f379c26200f3c7bd5f09d9bc3068d3ef4edeb4853022b6", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + true}, + {"0 < S < L, small-order A, mixed-order R", + "9bd9f44f4dcc75bd531b56b2cd280b0bb38fc1cd6d1230e14861d861de092e79", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "f7badec5b8abeaf699583992219b7b223f1df3fbbea919844e3f7c554a43dd43a5bb704786be79fc476f91d3f3f89b03984d8068dcf1bb7dfc6637b45450ac04", + true}, + {"0 < S < L, mixed-order A, small-order R", + "aebf3f2601a0c8c5d39cc7d8911642f740b78168218da8471772b35f9d35b9ab", + "f7badec5b8abeaf699583992219b7b223f1df3fbbea919844e3f7c554a43dd43", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa8c4bd45aecaca5b24fb97bc10ac27ac8751a7dfe1baff8b953ec9f5833ca260e", + false}, + {"0 < S < L, mixed-order A, mixed-order R", + "9bd9f44f4dcc75bd531b56b2cd280b0bb38fc1cd6d1230e14861d861de092e79", + "cdb267ce40c5cd45306fa5d2f29731459387dbf9eb933b7bd5aed9a765b88d4d", + "9046a64750444938de19f227bb80485e92b83fdb4b6506c160484c016cc1852f87909e14428a7a1d62e9f22f3d3ad7802db02eb2e688b6c52fcd6648a98bd009", + false}, + {"0 < S < L, mixed-order A, mixed-order R, SB != R + hA", + "e47d62c63f830dc7a6851a0b1f33ae4bb2f507fb6cffec4011eaccd55b53f56c", + "cdb267ce40c5cd45306fa5d2f29731459387dbf9eb933b7bd5aed9a765b88d4d", + "160a1cb0dc9c0258cd0a7d23e94d8fa878bcb1925f2c64246b2dee1796bed5125ec6bc982a269b723e0668e540911a9a6a58921d6925e434ab10aa7940551a09", + false}, + {`0 < S < L, mixed-order A, L-order R, SB != R + hA ("#5 fails any cofactored verification that pre-reduces scalar 8h")`, + "e47d62c63f830dc7a6851a0b1f33ae4bb2f507fb6cffec4011eaccd55b53f56c", + "cdb267ce40c5cd45306fa5d2f29731459387dbf9eb933b7bd5aed9a765b88d4d", + "21122a84e0b5fca4052f5b1235c80a537878b38f3142356b2c2384ebad4668b7e40bc836dac0f71076f9abe3a53f9c03c1ceeeddb658d0030494ace586687405", + false}, + {"S > L, L-order A, L-order R", + "85e241a07d148b41e47d62c63f830dc7a6851a0b1f33ae4bb2f507fb6cffec40", + "442aad9f089ad9e14647b1ef9099a1ff4798d78589e66f28eca69c11f582a623", + "e96f66be976d82e60150baecff9906684aebb1ef181f67a7189ac78ea23b6c0e547f7690a0e2ddcd04d87dbc3490dc19b3b3052f7ff0538cb68afb369ba3a514", + true}, + {`S >> L, L-order A, L-order R ("#7 fails bitwise tests that S > L")`, + "85e241a07d148b41e47d62c63f830dc7a6851a0b1f33ae4bb2f507fb6cffec40", + "442aad9f089ad9e14647b1ef9099a1ff4798d78589e66f28eca69c11f582a623", + "8ce5b96c8f26d0ab6c47958c9e68b937104cd36e13c33566acd2fe8d38aa19427e71f98a4734e74f2f13f06f97c20d58cc3f54b8bd0d272f42b695dd7e89a8c2", + true}, + {`0 < S < L, mixed-order A, small-order R ("#8-9 have non-canonical R; implementations that reduce R before hashing will accept #8 and reject #9, while those that do not will reject #8 and accept #9")`, + "9bedc267423725d473888631ebf45988bad3db83851ee85c85e241a07d148b41", + "f7badec5b8abeaf699583992219b7b223f1df3fbbea919844e3f7c554a43dd43", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03be9678ac102edcd92b0210bb34d7428d12ffc5df5f37e359941266a4e35f0f", + true}, + {`0 < S < L, mixed-order A, small-order R ("#8-9 have non-canonical R; implementations that reduce R before hashing will accept #8 and reject #9, while those that do not will reject #8 and accept #9")`, + "9bedc267423725d473888631ebf45988bad3db83851ee85c85e241a07d148b41", + "f7badec5b8abeaf699583992219b7b223f1df3fbbea919844e3f7c554a43dd43", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffca8c5b64cd208982aa38d4936621a4775aa233aa0505711d8fdcfdaa943d4908", + true}, + {`0 < S < L, small-order A, mixed-order R ("#10-11 have a non-canonical A; implementations that reduce A before hashing will accept #10 and reject #11, while those that do not will reject #10 and accept #11")`, + "e96b7021eb39c1a163b6da4e3093dcd3f21387da4cc4572be588fafae23c155b", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "a9d55260f765261eb9b84e106f665e00b867287a761990d7135963ee0a7d59dca5bb704786be79fc476f91d3f3f89b03984d8068dcf1bb7dfc6637b45450ac04", + true}, + {`0 < S < L, small-order A, mixed-order R ("#10-11 have a non-canonical A; implementations that reduce A before hashing will accept #10 and reject #11, while those that do not will reject #10 and accept #11")`, + "39a591f5321bbe07fd5a23dc2f39d025d74526615746727ceefd6e82ae65c06f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "a9d55260f765261eb9b84e106f665e00b867287a761990d7135963ee0a7d59dca5bb704786be79fc476f91d3f3f89b03984d8068dcf1bb7dfc6637b45450ac04", + true}, +} + +// "It's 255:19AM" blog post test vectors, from the ed25519consensus package +var ed25519consensusCases = [196]struct{ pk, sig string }{ + { + "0100000000000000000000000000000000000000000000000000000000000000", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000000", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000080", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "0100000000000000000000000000000000000000000000000000000000000080", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc050000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc850000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000", + }, +} diff --git a/crypto/libsodium-fork/src/libsodium/crypto_sign/ed25519/ref10/batch.c b/crypto/libsodium-fork/src/libsodium/crypto_sign/ed25519/ref10/batch.c index 83f93a857d..030c64592d 100644 --- a/crypto/libsodium-fork/src/libsodium/crypto_sign/ed25519/ref10/batch.c +++ b/crypto/libsodium-fork/src/libsodium/crypto_sign/ed25519/ref10/batch.c @@ -118,7 +118,7 @@ heap_get_top2(batch_heap *heap, heap_index_t *max1, heap_index_t *max2, size_t l /* */ void ge25519_multi_scalarmult_vartime_final(ge25519_p3 *r, ge25519_p3 *point, sc25519 scalar) { - const sc25519_element_t topbit = ((sc25519_element_t)1 << (SC25519_LIMB_SIZE - 1)); + const sc25519_element_t topbit = ((sc25519_element_t)1 << (SC25519_BITS_PER_LIMB - 1)); size_t limb = limb128bits; sc25519_element_t flag; ge25519_p1p1 p1p1_r; diff --git a/crypto/merklearray/merkle_test.go b/crypto/merklearray/merkle_test.go index 4c15f74df6..b84beef624 100644 --- a/crypto/merklearray/merkle_test.go +++ b/crypto/merklearray/merkle_test.go @@ -485,7 +485,7 @@ func TestSizeLimitsMerkle(t *testing.T) { for depth := uint64(0); depth < uint64(18); depth = depth + increment { size := uint64(1) << depth - // eltCoefficient is the coefficent to determine how many elements are in the proof. + // eltCoefficient is the coefficient to determine how many elements are in the proof. // There will be 1/eltCoefficient elements of all possible element (2^treeDepth) // numElts = 2^(depth-eltCoefficient) diff --git a/crypto/merklesignature/merkleSignatureScheme_test.go b/crypto/merklesignature/merkleSignatureScheme_test.go index 9670e90ba1..83515099df 100644 --- a/crypto/merklesignature/merkleSignatureScheme_test.go +++ b/crypto/merklesignature/merkleSignatureScheme_test.go @@ -18,7 +18,6 @@ package merklesignature import ( "crypto/rand" - "errors" "math" "testing" @@ -329,7 +328,7 @@ func TestBadRound(t *testing.T) { err = signer.GetVerifier().VerifyBytes(start+2, msg, &sig) a.Error(err) a.ErrorIs(err, ErrSignatureSchemeVerificationFailed) - a.True(errors.Is(err, ErrSignatureSchemeVerificationFailed)) + a.ErrorIs(err, ErrSignatureSchemeVerificationFailed) } func TestBadMerkleProofInSignature(t *testing.T) { diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go index 6132007559..5d8fd78779 100644 --- a/crypto/merkletrie/cache.go +++ b/crypto/merkletrie/cache.go @@ -256,9 +256,7 @@ func (mtc *merkleTrieCache) loadPage(page uint64) (err error) { mtc.cachedNodeCount += len(mtc.pageToNIDsPtr[page]) } else { mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[page]) - for nodeID, pnode := range decodedNodes { - mtc.pageToNIDsPtr[page][nodeID] = pnode - } + maps.Copy(mtc.pageToNIDsPtr[page], decodedNodes) mtc.cachedNodeCount += len(mtc.pageToNIDsPtr[page]) } @@ -485,9 +483,7 @@ func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToC delete(createdPages, page) } - for pageID, page := range mtc.reallocatedPages { - createdPages[pageID] = page - } + maps.Copy(createdPages, mtc.reallocatedPages) for _, nodeIDs := range createdPages { for _, node := range nodeIDs { diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go index ca58b84370..0b7142457f 100644 --- a/crypto/msgp_gen.go +++ b/crypto/msgp_gen.go @@ -2393,16 +2393,6 @@ func OneTimeSignatureSecretsMaxSize() (s int) { s += 6 + msgp.Uint64Size + 4 // Calculating size of slice: z.OneTimeSignatureSecretsPersistent.Batches panic("Slice z.OneTimeSignatureSecretsPersistent.Batches is unbounded") - s += 9 + msgp.Uint64Size + 8 - // Calculating size of slice: z.OneTimeSignatureSecretsPersistent.Offsets - panic("Slice z.OneTimeSignatureSecretsPersistent.Offsets is unbounded") - s += 7 - // Calculating size of array: z.OneTimeSignatureSecretsPersistent.OffsetsPK2 - s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) - s += 10 - // Calculating size of array: z.OneTimeSignatureSecretsPersistent.OffsetsPK2Sig - s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) - return } // MarshalMsg implements msgp.Marshaler @@ -2751,16 +2741,6 @@ func OneTimeSignatureSecretsPersistentMaxSize() (s int) { s += 6 + msgp.Uint64Size + 4 // Calculating size of slice: z.Batches panic("Slice z.Batches is unbounded") - s += 9 + msgp.Uint64Size + 8 - // Calculating size of slice: z.Offsets - panic("Slice z.Offsets is unbounded") - s += 7 - // Calculating size of array: z.OffsetsPK2 - s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) - s += 10 - // Calculating size of array: z.OffsetsPK2Sig - s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) - return } // MarshalMsg implements msgp.Marshaler diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go index 11e1dd791c..2527fda740 100644 --- a/crypto/multisig_test.go +++ b/crypto/multisig_test.go @@ -310,7 +310,7 @@ func TestMoreThanMaxSigsInMultisig(t *testing.T) { } msig, err := MultisigAssemble(sigs) - require.NoError(t, err, "Multisig: error assmeble multisig") + require.NoError(t, err, "Multisig: error assemble multisig") err = MultisigVerify(txid, addr, msig) require.Error(t, err, "Multisig: did not return error as expected") br := MakeBatchVerifier() @@ -346,7 +346,7 @@ func TestOneSignatureIsEmpty(t *testing.T) { } msig, err := MultisigAssemble(sigs) - require.NoError(t, err, "Multisig: error assmeble multisig") + require.NoError(t, err, "Multisig: error assemble multisig") msig.Subsigs[0].Sig = Signature{} err = MultisigVerify(txid, addr, msig) require.Error(t, err, "Multisig: did not return error as expected") @@ -386,7 +386,7 @@ func TestOneSignatureIsInvalid(t *testing.T) { sigs[1].Subsigs[1].Sig[5] = sigs[1].Subsigs[1].Sig[5] + 1 msig, err := MultisigAssemble(sigs) - require.NoError(t, err, "Multisig: error assmeble multisig") + require.NoError(t, err, "Multisig: error assemble multisig") err = MultisigVerify(txid, addr, msig) require.Error(t, err, "Multisig: did not return error as expected") br := MakeBatchVerifier() diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go index 11930d633e..420ae442e8 100644 --- a/crypto/onetimesig.go +++ b/crypto/onetimesig.go @@ -391,25 +391,11 @@ func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message } func (v OneTimeSignatureVerifier) batchVerify(batchID OneTimeSignatureSubkeyBatchID, offsetID OneTimeSignatureSubkeyOffsetID, message Hashable, sig OneTimeSignature) bool { - // serialize encoded batchID, offsetID, message into a continuous memory buffer with the layout - // hashRep(batchID)... hashRep(offsetID)... hashRep(message)... - const estimatedSize = 256 - messageBuffer := make([]byte, 0, estimatedSize) - - messageBuffer = HashRepToBuff(batchID, messageBuffer) - batchIDLen := uint64(len(messageBuffer)) - messageBuffer = HashRepToBuff(offsetID, messageBuffer) - offsetIDLen := uint64(len(messageBuffer)) - batchIDLen - messageBuffer = HashRepToBuff(message, messageBuffer) - messageLen := uint64(len(messageBuffer)) - offsetIDLen - batchIDLen - msgLengths := []uint64{batchIDLen, offsetIDLen, messageLen} - allValid, _ := cgoBatchVerificationImpl( - messageBuffer, - msgLengths, - []PublicKey{PublicKey(v), PublicKey(batchID.SubKeyPK), PublicKey(offsetID.SubKeyPK)}, - []Signature{Signature(sig.PK2Sig), Signature(sig.PK1Sig), Signature(sig.Sig)}, - ) - return allValid + bv := MakeBatchVerifierWithHint(3) + bv.EnqueueSignature(PublicKey(v), batchID, Signature(sig.PK2Sig)) + bv.EnqueueSignature(PublicKey(batchID.SubKeyPK), offsetID, Signature(sig.PK1Sig)) + bv.EnqueueSignature(PublicKey(offsetID.SubKeyPK), message, Signature(sig.Sig)) + return bv.Verify() == nil } // DeleteBeforeFineGrained deletes ephemeral keys before (but not including) the given id. @@ -423,10 +409,7 @@ func (s *OneTimeSignatureSecrets) DeleteBeforeFineGrained(current OneTimeSignatu // subkeys. if current.Batch+1 == s.FirstBatch { if current.Offset > s.FirstOffset { - jump := current.Offset - s.FirstOffset - if jump > uint64(len(s.Offsets)) { - jump = uint64(len(s.Offsets)) - } + jump := min(current.Offset-s.FirstOffset, uint64(len(s.Offsets))) s.FirstOffset += jump s.Offsets = s.Offsets[jump:] diff --git a/crypto/testdata/ed25519vectors.json.gz b/crypto/testdata/ed25519vectors.json.gz new file mode 100644 index 0000000000..52605c9690 Binary files /dev/null and b/crypto/testdata/ed25519vectors.json.gz differ diff --git a/daemon/algod/api/server/common/test/handlers_test.go b/daemon/algod/api/server/common/test/handlers_test.go index ad2ed00a4c..170252260c 100644 --- a/daemon/algod/api/server/common/test/handlers_test.go +++ b/daemon/algod/api/server/common/test/handlers_test.go @@ -17,7 +17,6 @@ package test import ( - "fmt" "net/http" "net/http/httptest" "testing" @@ -34,11 +33,11 @@ import ( func mockNodeStatusInRangeHelper( t *testing.T, statusCode MockNodeCatchupStatus, - expectedErr error, expectedStatus node.StatusReport) { + expectedErr string, expectedStatus node.StatusReport) { mockNodeInstance := makeMockNode(statusCode) status, err := mockNodeInstance.Status() - if expectedErr != nil { - require.Error(t, err, expectedErr) + if expectedErr != "" { + require.EqualError(t, err, expectedErr) } else { require.Equal(t, expectedStatus, status) } @@ -48,13 +47,13 @@ func TestMockNodeStatus(t *testing.T) { partitiontest.PartitionTest(t) mockNodeStatusInRangeHelper( - t, CaughtUpAndReady, nil, cannedStatusReportCaughtUpAndReadyGolden) + t, CaughtUpAndReady, "", cannedStatusReportCaughtUpAndReadyGolden) mockNodeStatusInRangeHelper( - t, CatchingUpFast, nil, cannedStatusReportCatchingUpFastGolden) + t, CatchingUpFast, "", cannedStatusReportCatchingUpFastGolden) mockNodeStatusInRangeHelper( - t, StoppedAtUnsupported, nil, cannedStatusReportStoppedAtUnsupportedGolden) + t, StoppedAtUnsupported, "", cannedStatusReportStoppedAtUnsupportedGolden) mockNodeStatusInRangeHelper( - t, 399, fmt.Errorf("catchup status out of scope error"), node.StatusReport{}) + t, 399, "catchup status out of scope error", node.StatusReport{}) } func readyEndpointTestHelper( diff --git a/daemon/algod/api/server/v1/routes/routes.go b/daemon/algod/api/server/v1/routes/routes.go index fe0684637d..6bf85e2851 100644 --- a/daemon/algod/api/server/v1/routes/routes.go +++ b/daemon/algod/api/server/v1/routes/routes.go @@ -22,7 +22,7 @@ import ( ) // V1Routes contains all routes for v1 -// v1 algod paths will route to the sunset message, resulting in a 410 Gone response. +// These algod paths will route to the sunset message, resulting in a 410 Gone response. var V1Routes = lib.Routes{ lib.Route{ Name: "status", diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go index e84c02dc89..8bd7addc92 100644 --- a/daemon/algod/api/server/v2/dryrun_test.go +++ b/daemon/algod/api/server/v2/dryrun_test.go @@ -1601,6 +1601,7 @@ int 1 a.NoError(err) appIdx := basics.AppIndex(7) + proto := config.Consensus[dryrunProtoVersion] dr := DryrunRequest{ ProtocolVersion: string(dryrunProtoVersion), Txns: []transactions.SignedTxn{txntest.Txn{ @@ -1618,8 +1619,8 @@ int 1 // Sender must exist (though no fee is ever taken) // AppAccount must exist and be able to pay the inner fee and the pay amount (but min balance not checked) Accounts: []model.Account{ - {Address: sender.String(), Status: "Offline"}, // sender - {Address: appIdx.Address().String(), Status: "Offline", AmountWithoutPendingRewards: 1_010}}, // app account + {Address: sender.String(), Status: "Offline"}, // sender + {Address: appIdx.Address().String(), Status: "Offline", AmountWithoutPendingRewards: proto.MinTxnFee + 10}}, // app account needs MinTxnFee + pay amount } var response model.DryrunResponse doDryrunRequest(&dr, &response) diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go index debeffbb7a..6d013995c2 100644 --- a/daemon/algod/api/server/v2/handlers_test.go +++ b/daemon/algod/api/server/v2/handlers_test.go @@ -160,10 +160,10 @@ func TestPendingTransactionResponseStruct(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - generatedResponseType := reflect.TypeOf(model.PendingTransactionResponse{}) + generatedResponseType := reflect.TypeFor[model.PendingTransactionResponse]() generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode)) - customResponseType := reflect.TypeOf(PreEncodedTxInfo{}) + customResponseType := reflect.TypeFor[PreEncodedTxInfo]() customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode)) expectedGeneratedTxnGraph := map[string]*tagNode{ @@ -186,10 +186,10 @@ func TestSimulateResponseStruct(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - generatedResponseType := reflect.TypeOf(model.SimulateResponse{}) + generatedResponseType := reflect.TypeFor[model.SimulateResponse]() generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode)) - customResponseType := reflect.TypeOf(PreEncodedSimulateResponse{}) + customResponseType := reflect.TypeFor[PreEncodedSimulateResponse]() customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode)) expectedGeneratedTxnGraph := map[string]*tagNode{ @@ -216,10 +216,10 @@ func TestSimulateRequestStruct(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - generatedResponseType := reflect.TypeOf(model.SimulateRequest{}) + generatedResponseType := reflect.TypeFor[model.SimulateRequest]() generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode)) - customResponseType := reflect.TypeOf(PreEncodedSimulateRequest{}) + customResponseType := reflect.TypeFor[PreEncodedSimulateRequest]() customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode)) expectedGeneratedTxnGraph := map[string]*tagNode{ diff --git a/daemon/algod/api/server/v2/test/genesis_types_test.go b/daemon/algod/api/server/v2/test/genesis_types_test.go index 0860235f37..7f3e67b96e 100644 --- a/daemon/algod/api/server/v2/test/genesis_types_test.go +++ b/daemon/algod/api/server/v2/test/genesis_types_test.go @@ -48,10 +48,10 @@ func getJSONTag(field reflect.StructField) string { func TestGenesisTypeCompatibility(t *testing.T) { partitiontest.PartitionTest(t) // Test Genesis struct compatibility - verifyStructCompatibility(t, reflect.TypeOf(bookkeeping.Genesis{}), reflect.TypeOf(model.Genesis{})) + verifyStructCompatibility(t, reflect.TypeFor[bookkeeping.Genesis](), reflect.TypeFor[model.Genesis]()) // Test GenesisAllocation struct compatibility - verifyStructCompatibility(t, reflect.TypeOf(bookkeeping.GenesisAllocation{}), reflect.TypeOf(model.GenesisAllocation{})) + verifyStructCompatibility(t, reflect.TypeFor[bookkeeping.GenesisAllocation](), reflect.TypeFor[model.GenesisAllocation]()) } // isStructOrPtrToStruct returns true if the type is a struct or pointer to struct diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go index 40d56f8981..d058eb9639 100644 --- a/daemon/algod/api/server/v2/test/handlers_resources_test.go +++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go @@ -437,10 +437,7 @@ func accountAssetInformationResourceLimitsTest(t *testing.T, handlers v2.Handler assert.Equal(t, maxResults, len(*ret.AssetHoldings)) // Asset holdings should match the first limit assets from the account data - minForResults := 0 - if inputNextToken > 0 { - minForResults = inputNextToken - } + minForResults := max(inputNextToken, 0) for i := minForResults; i < minForResults+maxResults; i++ { expectedIndex := basics.AssetIndex(i + 1) diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 8fb650078e..72c2beba0c 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -23,7 +23,7 @@ import ( "io" "net" "net/http" - _ "net/http/pprof" // net/http/pprof is for registering the pprof URLs with the web server, so http://localhost:8080/debug/pprof/ works. + _ "net/http/pprof" //nolint:gosec // registers handlers on http.DefaultServeMux, but we only route to it when Config.EnableProfiler is true "net/url" "os" "os/signal" diff --git a/daemon/kmd/api/api.go b/daemon/kmd/api/api.go index 4b34f92135..1f5df79c9c 100644 --- a/daemon/kmd/api/api.go +++ b/daemon/kmd/api/api.go @@ -59,7 +59,7 @@ // Base path must be a fully specified package name (else, it seems that swagger feeds a relative path to // loader.Config.Import(), and that breaks the vendor directory if the source is symlinked from elsewhere) // -//go:generate swagger generate spec -m -o="./swagger.json" +//go:generate swagger generate spec -m --transparent-aliases -o="./swagger.json" //go:generate swagger validate ./swagger.json --stop-on-error //go:generate sh ../lib/kmdapi/bundle_swagger_json.sh package api diff --git a/daemon/kmd/api/v1/handlers.go b/daemon/kmd/api/v1/handlers.go index ac2624110b..d8b33cc87b 100644 --- a/daemon/kmd/api/v1/handlers.go +++ b/daemon/kmd/api/v1/handlers.go @@ -38,7 +38,7 @@ type reqContext struct { sm *session.Manager } -// errorResponse sets the specified status code (should != 200), and fills in the +// errorResponse sets the specified status code (should != 200), and fills in // the response envelope by setting Error to true and a Message to the passed // user-readable error message. func errorResponse(w http.ResponseWriter, status int, err error) { diff --git a/daemon/kmd/config/config.go b/daemon/kmd/config/config.go index 95bce23129..f2aae3816e 100644 --- a/daemon/kmd/config/config.go +++ b/daemon/kmd/config/config.go @@ -104,7 +104,7 @@ func (k KMDConfig) Validate() error { return nil } -// LoadKMDConfig tries to read the the kmd configuration from disk, merging the +// LoadKMDConfig tries to read the kmd configuration from disk, merging the // default kmd configuration with what it finds func LoadKMDConfig(dataDir string) (cfg KMDConfig, err error) { cfg = defaultConfig(dataDir) diff --git a/daemon/kmd/wallet/driver/sqlite_crypto.go b/daemon/kmd/wallet/driver/sqlite_crypto.go index 618cb9345d..f30ac6d937 100644 --- a/daemon/kmd/wallet/driver/sqlite_crypto.go +++ b/daemon/kmd/wallet/driver/sqlite_crypto.go @@ -232,7 +232,7 @@ func decryptBlobWithPassword(blob []byte, ptType plaintextType, password []byte) // extractKeyWithIndex accepts the master derivation key and an index which // specifies the key to be derived func extractKeyWithIndex(derivationKey []byte, index uint64) (pk crypto.PublicKey, sk crypto.PrivateKey, err error) { - // The info tag is just the the utf-8 string representation of the index + // The info tag is just the utf-8 string representation of the index info := []byte(fmt.Sprintf(hkdfInfoFormat, index)) // We can skip hkdf.Extract since our key is long and uniformly random diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go index b8668d1893..165f53c41d 100644 --- a/data/account/registeryDbOps.go +++ b/data/account/registeryDbOps.go @@ -21,6 +21,7 @@ import ( "database/sql" "errors" "fmt" + "maps" "strings" "github.com/algorand/go-algorand/data/basics" @@ -257,9 +258,7 @@ func (f *flushOp) apply(db *participationDB) error { if err != nil { // put back what we didn't finish with db.mutex.Lock() - for id, v := range dirty { - db.dirty[id] = v - } + maps.Copy(db.dirty, dirty) db.mutex.Unlock() } diff --git a/data/accountManager_test.go b/data/accountManager_test.go index 87dae6e59d..8cce9ce945 100644 --- a/data/accountManager_test.go +++ b/data/accountManager_test.go @@ -185,7 +185,7 @@ func testAccountManagerKeys(t *testing.T, registry account.ParticipationRegistry <-keyDeletionDone testDuration := time.Since(testStartTime) t.Logf("testDuration %v keysTotalDuration %v\n", testDuration, keysTotalDuration) - require.Lessf(t, keysTotalDuration, testDuration/100, fmt.Sprintf("the time to aquire the keys via Keys() was %v whereas blocking on keys deletion took %v", keysTotalDuration, testDuration)) + require.Lessf(t, keysTotalDuration, testDuration/100, fmt.Sprintf("the time to acquire the keys via Keys() was %v whereas blocking on keys deletion took %v", keysTotalDuration, testDuration)) t.Logf("Calling AccountManager.Keys() while AccountManager.DeleteOldKeys() was busy, 10 times in a row, resulted in accumulated delay of %v\n", keysTotalDuration) } diff --git a/data/appRateLimiter_test.go b/data/appRateLimiter_test.go index 6a9efbd30a..472731ac15 100644 --- a/data/appRateLimiter_test.go +++ b/data/appRateLimiter_test.go @@ -230,7 +230,7 @@ func TestAppRateLimiter_IntervalSkip(t *testing.T) { now := time.Date(2023, 9, 11, 10, 10, 11, 0, time.UTC).UnixNano() // 11 sec => 1 sec into the interval // fill 80% of the current interval - // switch to the next next interval + // switch to the next interval // ensure all capacity is available for i := 0; i < int(0.8*float64(rate)); i++ { diff --git a/data/basics/fields_test.go b/data/basics/fields_test.go index 234e4246bf..48717fc20e 100644 --- a/data/basics/fields_test.go +++ b/data/basics/fields_test.go @@ -18,6 +18,7 @@ package basics_test import ( "reflect" + "slices" "testing" "github.com/algorand/go-algorand/data/basics" @@ -35,11 +36,9 @@ func makeTypeCheckFunction(t *testing.T, exceptions []reflectionhelpers.TypePath return func(path reflectionhelpers.TypePath, stack []reflect.Type) bool { currentType := stack[len(stack)-1] - for _, exception := range exceptions { - if path.Equals(exception) { - t.Logf("Skipping exception for path: %s", path) - return true - } + if slices.ContainsFunc(exceptions, path.Equals) { + t.Logf("Skipping exception for path: %s", path) + return true } switch currentType.Kind() { @@ -59,7 +58,7 @@ func makeTypeCheckFunction(t *testing.T, exceptions []reflectionhelpers.TypePath func TestBlockFields(t *testing.T) { partitiontest.PartitionTest(t) - typeToCheck := reflect.TypeOf(bookkeeping.Block{}) + typeToCheck := reflect.TypeFor[bookkeeping.Block]() // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string. exceptions := []reflectionhelpers.TypePath{ @@ -85,7 +84,7 @@ func TestBlockFields(t *testing.T) { func TestAccountDataFields(t *testing.T) { partitiontest.PartitionTest(t) - typeToCheck := reflect.TypeOf(basics.AccountData{}) + typeToCheck := reflect.TypeFor[basics.AccountData]() // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string. exceptions := []reflectionhelpers.TypePath{ diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go index 71c0c87ec0..aa35233b2a 100644 --- a/data/basics/msgp_gen.go +++ b/data/basics/msgp_gen.go @@ -6153,8 +6153,6 @@ func (z *TealValue) MsgIsZero() bool { func TealValueMaxSize() (s int) { s = 1 + 3 + msgp.Uint64Size + 3 panic("Unable to determine max size: String type z.Bytes is unbounded") - s += 3 + msgp.Uint64Size - return } // MarshalMsg implements msgp.Marshaler diff --git a/data/basics/serr.go b/data/basics/serr.go index c788c6ad4a..4e767a8bde 100644 --- a/data/basics/serr.go +++ b/data/basics/serr.go @@ -18,6 +18,7 @@ package basics import ( "errors" + "maps" "strings" "golang.org/x/exp/slog" @@ -53,7 +54,7 @@ func (e *SError) Error() string { } // imperfect because we replace \%A as well if strings.Contains(e.Msg, "%A") { - return strings.Replace(e.Msg, "%A", e.AttributesAsString(), -1) + return strings.ReplaceAll(e.Msg, "%A", e.AttributesAsString()) } return e.Msg } @@ -108,9 +109,7 @@ func Wrap(err error, msg string, field string, pairs ...any) error { var inner *SError if ok := errors.As(err, &inner); ok { attributes := make(map[string]any, len(inner.Attrs)) - for key, val := range inner.Attrs { - attributes[key] = val - } + maps.Copy(attributes, inner.Attrs) serr.Attrs[field+"-attrs"] = attributes } diff --git a/data/basics/testing/nearzero.go b/data/basics/testing/nearzero.go index 4620e77c72..e073cea496 100644 --- a/data/basics/testing/nearzero.go +++ b/data/basics/testing/nearzero.go @@ -18,6 +18,7 @@ package testing import ( "reflect" + "slices" "testing" "time" ) @@ -35,7 +36,7 @@ func NearZeros(t *testing.T, sample any) []any { if typ.Kind() != reflect.Struct { t.Fatalf("NearZeros: sample must be a struct, got %s", typ.Kind()) } - paths := CollectPaths(typ, []int{}) + paths := collectPaths(typ, nil, nil) var results []any for _, path := range paths { inst := makeInstanceWithNonZeroField(typ, path) @@ -45,14 +46,22 @@ func NearZeros(t *testing.T, sample any) []any { } // CollectPaths walks over the struct type (recursively) and returns a slice of -// index paths. Each path points to exactly one (exported) sub-field. +// index paths. Each path points to exactly one (exported) sub-field. If the +// type supplied is recursive, the path terminates at the recursion point. func CollectPaths(typ reflect.Type, prefix []int) [][]int { + return collectPaths(typ, prefix, []reflect.Type{}) +} + +// collectPaths walks over the struct type (recursively) and returns a slice of +// index paths. Each path points to exactly one (exported) sub-field. +// It tracks types in the current path to avoid infinite loops on recursive types. +func collectPaths(typ reflect.Type, prefix []int, pathStack []reflect.Type) [][]int { var paths [][]int switch typ.Kind() { case reflect.Ptr, reflect.Slice, reflect.Array: // Look through container to the element - return CollectPaths(typ.Elem(), prefix) + return collectPaths(typ.Elem(), prefix, pathStack) case reflect.Map: // Record as a leaf because we will just make a single entry in the map @@ -60,17 +69,27 @@ func CollectPaths(typ reflect.Type, prefix []int) [][]int { case reflect.Struct: // Special case: skip known value-type structs like time.Time - if typ == reflect.TypeOf(time.Time{}) { + if typ == reflect.TypeFor[time.Time]() { return [][]int{prefix} } + // Check if this type is already in the path stack (cycle detection) + if slices.Contains(pathStack, typ) { + // We've encountered a cycle, treat this as a leaf + return [][]int{prefix} + } + + // Add this type to the path stack + // Clone to avoid sharing the underlying array across branches + newStack := append(slices.Clone(pathStack), typ) + for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) if !field.IsExported() { continue } newPath := append(append([]int(nil), prefix...), i) - subPaths := CollectPaths(field.Type, newPath) + subPaths := collectPaths(field.Type, newPath, newStack) // If recursion yielded deeper paths, use them if len(subPaths) > 0 { diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index 431f9abe84..4ad24579b1 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -194,7 +194,7 @@ type AccountData struct { Assets map[AssetIndex]AssetHolding `codec:"asset,allocbound=bounds.EncodedMaxAssetsPerAccount"` // AuthAddr is the address against which signatures/multisigs/logicsigs should be checked. - // If empty, the address of the account whose AccountData this is is used. + // If empty, the address of the account whose AccountData this is used. // A transaction may change an account's AuthAddr to "re-key" the account. // This allows key rotation, changing the members in a multisig, etc. AuthAddr Address `codec:"spend"` diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go index 09c66bf58d..32fe332eae 100644 --- a/data/bookkeeping/msgp_gen.go +++ b/data/bookkeeping/msgp_gen.go @@ -2653,17 +2653,6 @@ func (z *Genesis) MsgIsZero() bool { func GenesisMaxSize() (s int) { s = 1 + 3 panic("Unable to determine max size: String type z.SchemaID is unbounded") - s += 8 + protocol.NetworkIDMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 6 - // Calculating size of slice: z.Allocation - s += msgp.ArrayHeaderSize + ((MaxInitialGenesisAllocationSize) * (GenesisAllocationMaxSize())) - s += 4 - panic("Unable to determine max size: String type z.RewardsPool is unbounded") - s += 5 - panic("Unable to determine max size: String type z.FeeSink is unbounded") - s += 10 + msgp.Int64Size + 8 - panic("Unable to determine max size: String type z.Comment is unbounded") - s += 8 + msgp.BoolSize - return } // MarshalMsg implements msgp.Marshaler @@ -3087,10 +3076,6 @@ func (z *GenesisAllocation) MsgIsZero() bool { func GenesisAllocationMaxSize() (s int) { s = 1 + 5 panic("Unable to determine max size: String type z.Address is unbounded") - s += 8 - panic("Unable to determine max size: String type z.Comment is unbounded") - s += 6 + GenesisAccountDataMaxSize() - return } // MarshalMsg implements msgp.Marshaler diff --git a/data/hashable/msgp_gen.go b/data/hashable/msgp_gen.go index 6906f7528f..fc20f28a28 100644 --- a/data/hashable/msgp_gen.go +++ b/data/hashable/msgp_gen.go @@ -136,5 +136,4 @@ func (z *Message) MsgIsZero() bool { func MessageMaxSize() (s int) { s = 1 + 4 panic("Unable to determine max size: String type z.Message is unbounded") - return } diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index dfdc633262..98e8e69c54 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -19,6 +19,7 @@ package pools import ( "errors" "fmt" + "maps" "sync" "sync/atomic" "time" @@ -268,9 +269,7 @@ func (pool *TransactionPool) rememberCommit(flush bool) { } else { pool.pendingTxGroups = append(pool.pendingTxGroups, pool.rememberedTxGroups...) - for txid, txn := range pool.rememberedTxids { - pool.pendingTxids[txid] = txn - } + maps.Copy(pool.pendingTxids, pool.rememberedTxids) } pool.rememberedTxGroups = nil @@ -641,7 +640,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactio pool.assemblyMu.Lock() defer pool.assemblyMu.Unlock() if evalRnd := pool.pendingBlockEvaluator.Round(); pool.assemblyRound > evalRnd { - // the block we're assembling now isn't the one the the AssembleBlock is waiting for. While it would be really cool + // the block we're assembling now isn't the one the AssembleBlock is waiting for. While it would be really cool // to finish generating the block, it would also be pointless to spend time on it. // we're going to set the ok and assemblyCompletedOrAbandoned to "true" so we can complete this loop asap pool.assemblyResults.ok = true diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go index 475cb27e94..73a06892d1 100644 --- a/data/pools/transactionPool_test.go +++ b/data/pools/transactionPool_test.go @@ -1546,7 +1546,7 @@ func TestStateProofLogging(t *testing.T) { _, err = transactionPool.AssembleBlock(514, time.Time{}) require.NoError(t, err) - // parse the log messages and retreive the Metrics for SP in assmbe block + // parse the log messages and retrieve the Metrics for SP in assemble block scanner := bufio.NewScanner(strings.NewReader(buf.String())) lines := make([]string, 0) for scanner.Scan() { diff --git a/data/transactions/asset_test.go b/data/transactions/asset_test.go new file mode 100644 index 0000000000..b4e1cbf53c --- /dev/null +++ b/data/transactions/asset_test.go @@ -0,0 +1,198 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package transactions + +import ( + "fmt" + "strings" + "testing" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func TestAxferWellFormedErrors(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cases := []struct { + axfer AssetTransferTxnFields + expectedError string + }{ + { + axfer: AssetTransferTxnFields{ + XferAsset: basics.AssetIndex(0), + AssetAmount: 0, + AssetReceiver: basics.Address{}, + }, + }, + { + axfer: AssetTransferTxnFields{ + XferAsset: basics.AssetIndex(0), + AssetAmount: 1, + AssetReceiver: basics.Address{0x01}, + }, + expectedError: "asset ID cannot be zero", + }, + { + axfer: AssetTransferTxnFields{ + XferAsset: basics.AssetIndex(1), + AssetAmount: 0, + AssetSender: basics.Address{0x01}, + AssetCloseTo: basics.Address{0x02}, + }, + expectedError: "cannot close asset by clawback", + }, + } + + for i, ax := range cases { + name := fmt.Sprintf("axfer_i=%d", i) + if ax.expectedError != "" { + name = ax.expectedError + } + t.Run(name, func(t *testing.T) { + err := ax.axfer.wellFormed() + if ax.expectedError != "" { + require.ErrorContains(t, err, ax.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestAcfgWellFormedErrors(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cv18 := protocol.ConsensusV18 + cv20 := protocol.ConsensusV20 + cv28 := protocol.ConsensusV28 + + cases := []struct { + acfg AssetConfigTxnFields + cv protocol.ConsensusVersion + expectedError string + }{ + { + acfg: AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + AssetName: strings.Repeat("A", 33), + }, + }, + cv: cv18, + expectedError: "transaction asset name too big: 33 > 32", + }, + { + acfg: AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + UnitName: strings.Repeat("B", 9), + }, + }, + expectedError: "transaction asset unit name too big: 9 > 8", + }, + { + acfg: AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + URL: strings.Repeat("C", 33), + }, + }, + cv: cv18, + expectedError: "transaction asset url too big: 33 > 32", + }, + { + acfg: AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + Decimals: 20, + }, + }, + cv: cv20, + expectedError: "transaction asset decimals is too high (max is 19)", + }, + { + acfg: AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + URL: strings.Repeat("D", 97), + }, + }, + cv: cv28, + expectedError: "transaction asset url too big: 97 > 96", + }, + } + + for i, ac := range cases { + name := fmt.Sprintf("acfg_i=%d", i) + if ac.expectedError != "" { + name = ac.expectedError + } + t.Run(name, func(t *testing.T) { + cv := ac.cv + if cv == "" { + cv = protocol.ConsensusFuture + } + err := ac.acfg.wellFormed(config.Consensus[cv]) + if ac.expectedError != "" { + require.ErrorContains(t, err, ac.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestAfrzWellFormedErrors(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cases := []struct { + afrz AssetFreezeTxnFields + expectedError string + }{ + { + afrz: AssetFreezeTxnFields{ + FreezeAccount: basics.Address{0x01}, + FreezeAsset: 0, + }, + expectedError: "asset ID cannot be zero", + }, + { + afrz: AssetFreezeTxnFields{ + FreezeAccount: basics.Address{}, + FreezeAsset: 1, + }, + expectedError: "freeze account cannot be empty", + }, + } + + for i, ac := range cases { + name := fmt.Sprintf("afrz_i=%d", i) + if ac.expectedError != "" { + name = ac.expectedError + } + t.Run(name, func(t *testing.T) { + err := ac.afrz.wellFormed() + if ac.expectedError != "" { + require.ErrorContains(t, err, ac.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 1bd170709c..42d46b50cf 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -2207,7 +2207,7 @@ func checkMacroName(macroName string, version uint64, labels map[string]int) err } else if count == 1 { secondRune = r } - if !unicode.IsLetter(r) && !unicode.IsDigit(r) && !otherAllowedChars[r] { + if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (int(r) >= len(otherAllowedChars) || !otherAllowedChars[r]) { return fmt.Errorf("%s character not allowed in macro name", string(r)) } count++ diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 49c545fffc..455a287836 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -443,6 +443,11 @@ pushbytes 0x0123 sumhash512 ` +const sha512Nonsense = ` +pushbytes 0x0123 +sha512 +` + const mimcNonsense = ` pushbytes 0x11223344556677889900aabbccddeeff11223344556677889900aabbccddeeff mimc BLS12_381Mp111 @@ -463,7 +468,7 @@ const v11Nonsense = v10Nonsense + incentiveNonsense + mimcNonsense const v12Nonsense = v11Nonsense + fvNonsense -const v13Nonsense = v12Nonsense + sumhashNonsense +const v13Nonsense = v12Nonsense + sumhashNonsense + sha512Nonsense const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a" @@ -493,7 +498,8 @@ const fvCompiled = "8002abcd494985" const v12Compiled = v11Compiled + fvCompiled const sumhashCompiled = "8002012386" -const v13Compiled = v12Compiled + sumhashCompiled +const sha512Compiled = "8002012387" +const v13Compiled = v12Compiled + sumhashCompiled + sha512Compiled var nonsense = map[uint64]string{ 1: v1Nonsense, @@ -1741,6 +1747,14 @@ global PayoutsPercent global PayoutsMinBalance global PayoutsMaxBalance txn RejectVersion +pushint 1 +block BlkBranch512 +pushint 1 +block BlkSha512_256TxnCommitment +pushint 1 +block BlkSha512TxnCommitment +pushint 1 +block BlkSha256TxnCommitment `, AssemblerMaxVersion) for _, names := range [][]string{GlobalFieldNames[:], TxnFieldNames[:], blockFieldNames[:]} { for _, f := range names { @@ -3545,7 +3559,8 @@ add: AssemblerMaxVersion, exp(3, "Cannot create label with same name as macro: coolLabel"), ) - // These two tests are just for coverage, they really really can't happen + testProg(t, `#define 👩 123`, AssemblerMaxVersion, exp(1, "👩 character not allowed in macro name")) + // These two tests are just for coverage, they really can't happen ops := newOpStream(AssemblerMaxVersion) err := define(&ops, []token{{str: "not#define"}}) require.EqualError(t, err, "0: invalid syntax: not#define") diff --git a/data/transactions/logic/crypto.go b/data/transactions/logic/crypto.go index 0a1c582301..b0acc75c00 100644 --- a/data/transactions/logic/crypto.go +++ b/data/transactions/logic/crypto.go @@ -125,6 +125,13 @@ func opSumhash512(cx *EvalContext) error { return nil } +func opSHA512(cx *EvalContext) error { + last := len(cx.Stack) - 1 + hash := sha512.Sum512(cx.Stack[last].Bytes) + cx.Stack[last].Bytes = hash[:] + return nil +} + func opFalconVerify(cx *EvalContext) error { last := len(cx.Stack) - 1 // index of PK prev := last - 1 // index of signature diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index 9caa1c19d5..6d5cba7eb4 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -117,6 +117,18 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A testAccepts(t, progText, 1) } +func TestSHA512(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // echo -n "hello" | sha512sum + progText := ` +byte "hello"; sha512 +byte 0x9b71d224bd62f3785d96d46ad3ea3d73319bfbc2890caadae2dff72519673ca72323c3d99ba5c11d7c7acc6e14b8c5da0c4663475c2e5c3adef46f73bcdec043 +==` + testAccepts(t, progText, 13) +} + func TestMimc(t *testing.T) { // We created test vectors for the MiMC hash function by defining a set of preimages for different // input sizes and calling gnark-crypto's MiMC implementation to compute the expected hash values. @@ -129,7 +141,7 @@ func TestMimc(t *testing.T) { // output does not change under the hood with new versions. // // We test that malformed inputs panic, in particular we test malfornmed inputs of: - // 0 length, lenghts not multiple of 32 bytes, chunks representing values greater than the modulus. + // 0 length, lengths not multiple of 32 bytes, chunks representing values greater than the modulus. // We test that well formed inputs hash correctly, testing both single chunk inputs (32-byte) and // multiple chunk inputs (96 bytes). partitiontest.PartitionTest(t) @@ -793,7 +805,7 @@ int ` + fmt.Sprintf("%d", testLogicBudget-2500-8) + ` } func BenchmarkHashes(b *testing.B) { - for _, hash := range []string{"sha256", "keccak256" /* skip, same as keccak "sha3_256", */, "sha512_256", "sumhash512", "mimc BN254Mp110", "mimc BLS12_381Mp111"} { + for _, hash := range []string{"sha256", "keccak256" /* skip, same as keccak "sha3_256", */, "sha512_256", "sumhash512", "mimc BN254Mp110", "mimc BLS12_381Mp111", "sha512"} { for _, size := range []int{0, 32, 128, 512, 1024, 4096} { if size == 0 && (hash == "mimc BN254Mp110" || hash == "mimc BLS12_381Mp111") { continue diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 2748bd7dca..80a3f0e943 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -37,6 +37,7 @@ var opDescByName = map[string]OpDesc{ "keccak256": {"Keccak256 hash of value A, yields [32]byte", "", nil}, "sha512_256": {"SHA512_256 hash of value A, yields [32]byte", "", nil}, "sha3_256": {"SHA3_256 hash of value A, yields [32]byte", "", nil}, + "sha512": {"SHA512 of value A, yields [64]byte", "", nil}, "sumhash512": {"sumhash512 of value A, yields [64]byte", "", nil}, "falcon_verify": {"for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey => {0 or 1}", "", nil}, @@ -361,7 +362,7 @@ var OpGroups = map[string][]string{ "Byte Array Manipulation": {"getbit", "setbit", "getbyte", "setbyte", "concat", "len", "substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"}, "Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"}, "Byte Array Logic": {"b|", "b&", "b^", "b~"}, - "Cryptography": {"sha256", "keccak256", "sha512_256", "sha3_256", "sumhash512", "falcon_verify", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to", "mimc"}, + "Cryptography": {"sha256", "keccak256", "sha512_256", "sha3_256", "sha512", "sumhash512", "falcon_verify", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to", "mimc"}, "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "pushints", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "pushbytess", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"}, "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch", "match"}, "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "voter_params_get", "online_stake", "log", "block"}, diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index d8b30228c7..ca1f2e07cf 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -522,13 +522,10 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 { // NewInnerEvalParams creates an EvalParams to be used while evaluating an inner group txgroup func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext) *EvalParams { - minAvmVersion := computeMinAvmVersion(txg) - // Can't happen currently, since earliest inner callable version is higher - // than any minimum imposed otherwise. But is correct to inherit a stronger - // restriction from above, in case of future restriction. - if minAvmVersion < caller.minAvmVersion { - minAvmVersion = caller.minAvmVersion - } + minAvmVersion := max(computeMinAvmVersion(txg), caller.minAvmVersion) + // caller.AvmVersion can't exceed the computed value currently, since earliest + // inner callable version is higher than any minimum imposed otherwise. But is + // correct to inherit a stronger restriction from above, in case of future restriction. // Unlike NewEvalParams, do not add fee credit here. opTxSubmit has already done so. @@ -1638,10 +1635,7 @@ func (cx *EvalContext) step() error { if len(cx.Stack) == 0 { stackString = "" } else { - num := 1 - if len(spec.Return.Types) > 1 { - num = len(spec.Return.Types) - } + num := max(len(spec.Return.Types), 1) // check for nil error here, because we might not return // values if we encounter an error in the opcode if err == nil { @@ -3238,10 +3232,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t return sv, fmt.Errorf("invalid ApprovalProgramPages index %d", arrayFieldIdx) } first := arrayFieldIdx * maxStringSize - last := first + maxStringSize - if last > uint64(len(txn.ApprovalProgram)) { - last = uint64(len(txn.ApprovalProgram)) - } + last := min(first+maxStringSize, uint64(len(txn.ApprovalProgram))) sv.Bytes = txn.ApprovalProgram[first:last] case NumClearStateProgramPages: sv.Uint = uint64(basics.DivCeil(len(txn.ClearStateProgram), maxStringSize)) @@ -3251,10 +3242,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t return sv, fmt.Errorf("invalid ClearStateProgramPages index %d", arrayFieldIdx) } first := arrayFieldIdx * maxStringSize - last := first + maxStringSize - if last > uint64(len(txn.ClearStateProgram)) { - last = uint64(len(txn.ClearStateProgram)) - } + last := min(first+maxStringSize, uint64(len(txn.ClearStateProgram))) sv.Bytes = txn.ClearStateProgram[first:last] case RekeyTo: sv.Bytes = txn.RekeyTo[:] @@ -3968,7 +3956,7 @@ func opGetBit(cx *EvalContext) error { var bit uint64 if target.avmType() == avmUint64 { if idx > 63 { - return errors.New("getbit index > 63 with with Uint") + return errors.New("getbit index > 63 with Uint") } mask := uint64(1) << idx bit = (target.Uint & mask) >> idx @@ -4698,7 +4686,7 @@ func opAppGlobalDel(cx *EvalContext) error { } // We have a difficult naming problem here. Some opcodes allow (and used to -// require) ASAs and Apps to to be referenced by their "index" in an app call +// require) ASAs and Apps to be referenced by their "index" in an app call // txn's foreign-apps or foreign-assets arrays. That was a small integer, no // more than 2 or so, and was often called an "index". But it was not a // basics.AssetIndex or basics.ApplicationIndex. @@ -5828,6 +5816,16 @@ func opBlock(cx *EvalContext) error { cx.Stack[last] = stackValue{Uint: hdr.Bonus.Raw} case BlkProposerPayout: cx.Stack[last] = stackValue{Uint: hdr.ProposerPayout.Raw} + + case BlkBranch512: + cx.Stack[last].Bytes = hdr.Branch512[:] + case BlkSha512_256TxnCommitment: + cx.Stack[last].Bytes = hdr.NativeSha512_256Commitment[:] + case BlkSha256TxnCommitment: + cx.Stack[last].Bytes = hdr.Sha256Commitment[:] + case BlkSha512TxnCommitment: + cx.Stack[last].Bytes = hdr.Sha512Commitment[:] + default: return fmt.Errorf("invalid block field %s", fs.field) } diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go index 21dbc52087..23f9701731 100644 --- a/data/transactions/logic/evalAppTxn_test.go +++ b/data/transactions/logic/evalAppTxn_test.go @@ -108,7 +108,7 @@ func TestFieldTypes(t *testing.T) { TestApp(t, NoTrack("itxn_begin; int 7; itxn_field Receiver;"), ep, "not an address") TestApp(t, NoTrack("itxn_begin; byte \"\"; itxn_field CloseRemainderTo;"), ep, "not an address") TestApp(t, NoTrack("itxn_begin; byte \"\"; itxn_field AssetSender;"), ep, "not an address") - // can't really tell if it's an addres, so 32 bytes gets further + // can't really tell if it's an address, so 32 bytes gets further TestApp(t, "itxn_begin; byte \"01234567890123456789012345678901\"; itxn_field AssetReceiver; int 1", ep, "unavailable Account") // but a b32 string rep is not an account @@ -1897,7 +1897,7 @@ func TestTxIDAndGroupIDCalculation(t *testing.T) { }) require.Equal(t, crypto.Digest{0x96, 0x90, 0x1, 0x64, 0x24, 0xa5, 0xda, 0x4, 0x3d, 0xd, 0x40, 0xc9, 0xf6, 0xfa, 0xc3, 0xa6, 0x26, 0x19, 0xd3, 0xf0, 0xb7, 0x28, 0x87, 0xf8, 0x5a, 0xd1, 0xa7, 0xbc, 0x1d, 0xad, 0x8b, 0xfc}, gcBCDgroup) } else { - // these calculations are "wrong," but they're here to maintain backwards compatability with the original implementation + // these calculations are "wrong," but they're here to maintain backwards compatibility with the original implementation gcAAtxid = actual[grandchildAAIndex].txn.InnerID(childAtxn.ID(), 0) require.Equal(t, transactions.Txid{0xb5, 0xa, 0x16, 0x90, 0x78, 0x21, 0xf6, 0x96, 0x1b, 0x9c, 0x72, 0x5e, 0xf4, 0x8b, 0xe7, 0xb8, 0x2b, 0xd, 0x74, 0xd4, 0x71, 0xa2, 0x43, 0xb0, 0xfc, 0x19, 0xbc, 0x1c, 0xda, 0x95, 0x8f, 0xd0}, gcAAtxid) @@ -2276,7 +2276,7 @@ func TestInnerTxIDCalculation(t *testing.T) { gcBDtxid = actual[grandchildBDIndex].txn.InnerID(childBtxid, 3) require.Equal(t, transactions.Txid{0xcd, 0x15, 0x47, 0x3f, 0x42, 0xf5, 0x9c, 0x4a, 0x11, 0xa4, 0xe3, 0x92, 0x30, 0xf, 0x97, 0x1d, 0x3b, 0x1, 0x7, 0xbc, 0x1f, 0x3f, 0xcc, 0x9d, 0x43, 0x5b, 0xb2, 0xa4, 0x15, 0x8b, 0x89, 0x4e}, gcBDtxid) } else { - // these calculations are "wrong," but they're here to maintain backwards compatability with the original implementation + // these calculations are "wrong," but they're here to maintain backwards compatibility with the original implementation childAtxid = childAtxn.ID() require.Equal(t, transactions.Txid{0xc9, 0xa4, 0x41, 0xff, 0x9c, 0x62, 0x40, 0x6e, 0x63, 0xd9, 0x5, 0x19, 0x3b, 0x32, 0x43, 0x3d, 0xba, 0x80, 0x9f, 0xa3, 0xe4, 0xed, 0x2f, 0xa4, 0x19, 0x2b, 0x3f, 0x21, 0x96, 0xe2, 0xec, 0x21}, childAtxid) diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index 40251e7551..63733389d3 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -665,7 +665,7 @@ func testAppFull(t *testing.T, program []byte, gi int, aid basics.AppIndex, ep * // that something STOPS working as of a particular version. Note that this does // *not* use different consensus versions. It is tempting to make it find the // lowest possible consensus version in the loop in order to support the `v` it -// it working on. For super confidence, one might argue this should be a nested +// is working on. For super confidence, one might argue this should be a nested // loop over all of the consensus versions that work with the `v`, from the // first possible, to vFuture. func testLogicRange(t *testing.T, start, stop int, test func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger)) { @@ -1107,7 +1107,7 @@ int 4141 == ` // check that even during application creation (Txn.ApplicationID == 0) - // we will use the the kvCow if the exact application ID (100) is + // we will use the kvCow if the exact application ID (100) is // specified in the transaction now.TxnGroup[0].Txn.ApplicationID = 0 now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{100} diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 3fc3028596..ac72f68567 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -88,7 +88,7 @@ func makeTestProto(opts ...protoOpt) *config.ConsensusParams { // With the addition of itxn_field, itxn_submit, which rely on // machinery outside logic package for validity checking, we - // need a more realistic set of consensus paramaters. + // need a more realistic set of consensus parameters. Asset: true, MaxAssetNameBytes: 12, MaxAssetUnitNameBytes: 6, diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index 4e6f07bcae..9e6a25c7e6 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -1075,6 +1075,18 @@ const ( // BlkProposerPayout is the actual amount moved from feesink to proposer BlkProposerPayout + // BlkBranch512 is the wider, sha-512 hash of the previous block + BlkBranch512 + + // BlkSha512_256TxnCommitment is "Algorand Native" txn merkle root + BlkSha512_256TxnCommitment + + // BlkSha256TxnCommitment is the sha256 txn merkle root + BlkSha256TxnCommitment + + // BlkSha512TxnCommitment is the sha512 txn merkle root + BlkSha512TxnCommitment + invalidBlockField // compile-time constant for number of fields ) @@ -1097,6 +1109,10 @@ var blockFieldSpecs = [...]blockFieldSpec{ {BlkProtocol, StackBytes, incentiveVersion}, {BlkTxnCounter, StackUint64, incentiveVersion}, {BlkProposerPayout, StackUint64, incentiveVersion}, + {BlkBranch512, StackBytes64, 13}, + {BlkSha512_256TxnCommitment, StackBytes32, 13}, + {BlkSha256TxnCommitment, StackBytes32, 13}, + {BlkSha512TxnCommitment, StackBytes64, 13}, } func blockFieldSpecByField(r BlockField) (blockFieldSpec, bool) { diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go index a9059e56f3..d06d737b4b 100644 --- a/data/transactions/logic/fields_string.go +++ b/data/transactions/logic/fields_string.go @@ -408,12 +408,16 @@ func _() { _ = x[BlkProtocol-7] _ = x[BlkTxnCounter-8] _ = x[BlkProposerPayout-9] - _ = x[invalidBlockField-10] + _ = x[BlkBranch512-10] + _ = x[BlkSha512_256TxnCommitment-11] + _ = x[BlkSha256TxnCommitment-12] + _ = x[BlkSha512TxnCommitment-13] + _ = x[invalidBlockField-14] } -const _BlockField_name = "BlkSeedBlkTimestampBlkProposerBlkFeesCollectedBlkBonusBlkBranchBlkFeeSinkBlkProtocolBlkTxnCounterBlkProposerPayoutinvalidBlockField" +const _BlockField_name = "BlkSeedBlkTimestampBlkProposerBlkFeesCollectedBlkBonusBlkBranchBlkFeeSinkBlkProtocolBlkTxnCounterBlkProposerPayoutBlkBranch512BlkSha512_256TxnCommitmentBlkSha256TxnCommitmentBlkSha512TxnCommitmentinvalidBlockField" -var _BlockField_index = [...]uint8{0, 7, 19, 30, 46, 54, 63, 73, 84, 97, 114, 131} +var _BlockField_index = [...]uint8{0, 7, 19, 30, 46, 54, 63, 73, 84, 97, 114, 126, 152, 174, 196, 213} func (i BlockField) String() string { if i < 0 || i >= BlockField(len(_BlockField_index)-1) { diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go index c1210ee1ee..c0f4d11fb2 100644 --- a/data/transactions/logic/mocktracer/scenarios.go +++ b/data/transactions/logic/mocktracer/scenarios.go @@ -19,6 +19,7 @@ package mocktracer import ( "encoding/hex" "fmt" + "maps" "math" "github.com/algorand/go-algorand/crypto" @@ -836,9 +837,7 @@ func MergeStateDeltas(deltas ...ledgercore.StateDelta) ledgercore.StateDelta { includedTx.Intra += txidBase result.Txids[txid] = includedTx } - for lease, round := range delta.Txleases { - result.Txleases[lease] = round - } + maps.Copy(result.Txleases, delta.Txleases) } return result } diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 20acff0e72..6dc75d555e 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -657,6 +657,7 @@ var OpSpecs = []OpSpec{ {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bb{64}b{32}:T"), 7, costly(1900)}, {0x85, "falcon_verify", opFalconVerify, proto("bb{1232}b{1793}:T"), 12, costly(1700)}, // dynamic for internal hash? {0x86, "sumhash512", opSumhash512, proto("b:b{64}"), sumhashVersion, costByLength(150, 7, 4, 0)}, + {0x87, "sha512", opSHA512, proto("b:b{64}"), 13, costByLength(15, 32, 2, 0)}, // "Function oriented" {0x88, "callsub", opCallSub, proto(":"), 4, detBranch()}, diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json index ed46327e9b..5473503ad0 100644 --- a/data/transactions/logic/teal.tmLanguage.json +++ b/data/transactions/logic/teal.tmLanguage.json @@ -76,7 +76,7 @@ }, { "name": "keyword.operator.teal", - "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|divmodw|divw|exp|expw|itob|mulw|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|concat|extract|extract3|extract_uint16|extract_uint32|extract_uint64|getbit|getbyte|json_ref|len|replace2|replace3|setbit|setbyte|substring|substring3|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|falcon_verify|keccak256|mimc|sha256|sha3_256|sha512_256|sumhash512|vrf_verify|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" + "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|divmodw|divw|exp|expw|itob|mulw|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|concat|extract|extract3|extract_uint16|extract_uint32|extract_uint64|getbit|getbyte|json_ref|len|replace2|replace3|setbit|setbyte|substring|substring3|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|falcon_verify|keccak256|mimc|sha256|sha3_256|sha512|sha512_256|sumhash512|vrf_verify|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" } ] }, diff --git a/data/transactions/payment_test.go b/data/transactions/payment_test.go index 6f65a30195..50b8491079 100644 --- a/data/transactions/payment_test.go +++ b/data/transactions/payment_test.go @@ -156,7 +156,7 @@ func TestWellFormedPaymentErrors(t *testing.T) { }, }, proto: protoV27, - expectedError: makeMinFeeErrorf("transaction had fee %d, which is less than the minimum %d", 100, curProto.MinTxnFee), + expectedError: makeMinFeeErrorf("transaction had fee %d, which is less than the minimum %d", 100, protoV27.MinTxnFee), }, { tx: Transaction{ diff --git a/data/transactions/teal.go b/data/transactions/teal.go index 075466ba79..6f886d8680 100644 --- a/data/transactions/teal.go +++ b/data/transactions/teal.go @@ -58,17 +58,18 @@ func (ed EvalDelta) Equal(o EvalDelta) bool { return false } - // GlobalDeltas must be equal if !ed.GlobalDelta.Equal(o.GlobalDelta) { return false } - // Logs must be equal + if !slices.Equal(ed.SharedAccts, o.SharedAccts) { + return false + } + if !slices.Equal(ed.Logs, o.Logs) { return false } - // InnerTxns must be equal if len(ed.InnerTxns) != len(o.InnerTxns) { return false } diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go index 289e5f018b..30e1bdc5f5 100644 --- a/data/transactions/transaction_test.go +++ b/data/transactions/transaction_test.go @@ -19,11 +19,13 @@ package transactions import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + basics_testing "github.com/algorand/go-algorand/data/basics/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -76,6 +78,7 @@ func TestTransactionHash(t *testing.T) { func TestTransactionIDChanges(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() txn := Transaction{ Type: "pay", @@ -115,3 +118,39 @@ func TestTransactionIDChanges(t *testing.T) { t.Errorf("txid does not depend on lastvalid") } } + +func TestApplyDataEquality(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var empty ApplyData + for _, nz := range basics_testing.NearZeros(t, ApplyData{}) { + ad := nz.(ApplyData) + assert.False(t, ad.Equal(empty), "Equal() seems to be disregarding something %+v", ad) + } + +} + +func TestEvalDataEquality(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var empty EvalDelta + for _, nz := range basics_testing.NearZeros(t, EvalDelta{}) { + ed := nz.(EvalDelta) + assert.False(t, ed.Equal(empty), "Equal() seems to be disregarding something %+v", ed) + } + +} + +func TestLogicSigEquality(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var empty LogicSig + for _, nz := range basics_testing.NearZeros(t, LogicSig{}) { + ls := nz.(LogicSig) + assert.False(t, ls.Equal(&empty), "Equal() seems to be disregarding something %+v", ls) + } + +} diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 91ee51d6fc..ca9ca10c90 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -197,7 +197,7 @@ func txnGroup(stxs []transactions.SignedTxn, contextHdr *bookkeeping.BlockHeader } if cache != nil { - cache.Add(stxs, groupCtx) + cache.Add(groupCtx) } return @@ -531,7 +531,7 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea if verifyErr != nil { return verifyErr } - cache.AddPayset(txnGroups, groupCtxs) + cache.AddPayset(groupCtxs) return nil }, nextWorkset, worksDoneCh) if err1 != nil { diff --git a/data/transactions/verify/txnBatch.go b/data/transactions/verify/txnBatch.go index 9c32130317..6f1fff7ce5 100644 --- a/data/transactions/verify/txnBatch.go +++ b/data/transactions/verify/txnBatch.go @@ -258,11 +258,10 @@ func (tbp *txnSigBatchProcessor) postProcessVerifiedJobs(ctx interface{}, failed for i := range bl.txnGroups { tbp.sendResult(bl.txnGroups[i], bl.backlogMessage[i], nil) } - tbp.cache.AddPayset(bl.txnGroups, bl.groupCtxs) + tbp.cache.AddPayset(bl.groupCtxs) return } - verifiedTxnGroups := make([][]transactions.SignedTxn, 0, len(bl.txnGroups)) verifiedGroupCtxs := make([]*GroupContext, 0, len(bl.groupCtxs)) failedSigIdx := 0 for txgIdx := range bl.txnGroups { @@ -280,7 +279,6 @@ func (tbp *txnSigBatchProcessor) postProcessVerifiedJobs(ctx interface{}, failed } var result error if !txGroupSigFailed { - verifiedTxnGroups = append(verifiedTxnGroups, bl.txnGroups[txgIdx]) verifiedGroupCtxs = append(verifiedGroupCtxs, bl.groupCtxs[txgIdx]) } else { result = err @@ -288,5 +286,5 @@ func (tbp *txnSigBatchProcessor) postProcessVerifiedJobs(ctx interface{}, failed tbp.sendResult(bl.txnGroups[txgIdx], bl.backlogMessage[txgIdx], result) } // loading them all at once by locking the cache once - tbp.cache.AddPayset(verifiedTxnGroups, verifiedGroupCtxs) + tbp.cache.AddPayset(verifiedGroupCtxs) } diff --git a/data/transactions/verify/txnBatch_test.go b/data/transactions/verify/txnBatch_test.go index d7921ea3a6..036b1fc0b8 100644 --- a/data/transactions/verify/txnBatch_test.go +++ b/data/transactions/verify/txnBatch_test.go @@ -721,8 +721,7 @@ func TestStreamToBatchPostVBlocked(t *testing.T) { var badSigResultCounter int var goodSigResultCounter int - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() cache := MakeVerifiedTransactionCache(50) txBacklogSizeMod := txBacklogSize / 20 diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index 12d853af5b..ffcc0f4c77 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -820,7 +820,35 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= } _, err = TxnGroup(txnGroups[0], &blkHdr, nil, &dummyLedger) require.Error(t, err) - require.Contains(t, err.Error(), "only have one of Sig, Msig, or LMsig") + require.Contains(t, err.Error(), "should only have one of Sig, Msig, or LMsig") + txnGroups[0][0].Lsig.Msig.Subsigs = nil + + ///// logic with sig and LMsig + txnGroups[0][0].Lsig.LMsig.Subsigs = make([]crypto.MultisigSubsig, 1) + txnGroups[0][0].Lsig.LMsig.Subsigs[0] = crypto.MultisigSubsig{ + Key: crypto.PublicKey{0x1}, + Sig: crypto.Signature{0x2}, + } + _, err = TxnGroup(txnGroups[0], &blkHdr, nil, &dummyLedger) + require.Error(t, err) + require.Contains(t, err.Error(), "should only have one of Sig, Msig, or LMsig") + txnGroups[0][0].Lsig.Sig = crypto.Signature{} + txnGroups[0][0].Lsig.LMsig.Subsigs = nil + + ///// logic with Msig and LMsig + txnGroups[0][0].Lsig.Msig.Subsigs = make([]crypto.MultisigSubsig, 1) + txnGroups[0][0].Lsig.Msig.Subsigs[0] = crypto.MultisigSubsig{ + Key: crypto.PublicKey{0x1}, + Sig: crypto.Signature{0x2}, + } + txnGroups[0][0].Lsig.LMsig.Subsigs = make([]crypto.MultisigSubsig, 1) + txnGroups[0][0].Lsig.LMsig.Subsigs[0] = crypto.MultisigSubsig{ + Key: crypto.PublicKey{0x3}, + Sig: crypto.Signature{0x4}, + } + _, err = TxnGroup(txnGroups[0], &blkHdr, nil, &dummyLedger) + require.Error(t, err) + require.Contains(t, err.Error(), "should only have one of Sig, Msig, or LMsig") } @@ -833,10 +861,7 @@ func generateTransactionGroups(maxGroupSize int, signedTxns []transactions.Signe txnGroups := make([][]transactions.SignedTxn, 0, len(signedTxns)) for i := 0; i < len(signedTxns); { - txnsInGroup := rand.Intn(protoMaxGroupSize-1) + 1 - if txnsInGroup > maxGroupSize { - txnsInGroup = maxGroupSize - } + txnsInGroup := min(rand.Intn(protoMaxGroupSize-1)+1, maxGroupSize) if i+txnsInGroup > len(signedTxns) { txnsInGroup = len(signedTxns) - i } @@ -1032,8 +1057,18 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= func TestTxnGroupCacheUpdateLogicWithMultiSig(t *testing.T) { partitiontest.PartitionTest(t) + testVersions := []protocol.ConsensusVersion{protocol.ConsensusV40, protocol.ConsensusFuture} + for _, consensusVer := range testVersions { + t.Run(string(consensusVer), func(t *testing.T) { + useLMsig := config.Consensus[consensusVer].LogicSigLMsig + testTxnGroupCacheUpdateLogicWithMultiSig(t, consensusVer, useLMsig) + }) + } +} + +func testTxnGroupCacheUpdateLogicWithMultiSig(t *testing.T, consensusVer protocol.ConsensusVersion, useLMsig bool) { secrets, _, pks, multiAddress := generateMultiSigAccounts(t, 30) - blkHdr := createDummyBlockHeader() + blkHdr := createDummyBlockHeader(consensusVer) const numOfTxn = 20 signedTxn := make([]transactions.SignedTxn, numOfTxn) @@ -1058,8 +1093,12 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= signedTxn[i].Txn.Sender = multiAddress[s] signedTxn[i].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} signedTxn[i].Lsig.Logic = op.Program - program := logic.MultisigProgram{Addr: crypto.Digest(multiAddress[s]), Program: op.Program} - + var program crypto.Hashable + if useLMsig { + program = logic.MultisigProgram{Addr: crypto.Digest(multiAddress[s]), Program: op.Program} + } else { + program = logic.Program(op.Program) + } // create multi sig that 2 out of 3 has signed the txn var sigs [2]crypto.MultisigSig for j := 0; j < 2; j++ { @@ -1069,7 +1108,11 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= } msig, err := crypto.MultisigAssemble(sigs[:]) require.NoError(t, err) - signedTxn[i].Lsig.LMsig = msig + if useLMsig { + signedTxn[i].Lsig.LMsig = msig + } else { + signedTxn[i].Lsig.Msig = msig + } } txnGroups := make([][]transactions.SignedTxn, len(signedTxn)) @@ -1079,10 +1122,18 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= } breakSignatureFunc := func(txn *transactions.SignedTxn) { - txn.Lsig.LMsig.Subsigs[0].Sig[0]++ + if useLMsig { + txn.Lsig.LMsig.Subsigs[0].Sig[0]++ + } else { + txn.Lsig.Msig.Subsigs[0].Sig[0]++ + } } restoreSignatureFunc := func(txn *transactions.SignedTxn) { - txn.Lsig.LMsig.Subsigs[0].Sig[0]-- + if useLMsig { + txn.Lsig.LMsig.Subsigs[0].Sig[0]-- + } else { + txn.Lsig.Msig.Subsigs[0].Sig[0]-- + } } verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, crypto.ErrBatchHasFailedSigs.Error()) @@ -1215,3 +1266,268 @@ func BenchmarkTxn(b *testing.B) { } b.StopTimer() } + +// TestLogicSigMultisigValidation verifies that signatures are properly validated +// in different contexts (single-sig vs multisig, different multisig addresses). +func TestLogicSigMultisigValidation(t *testing.T) { + partitiontest.PartitionTest(t) + + t.Run("v40", func(t *testing.T) { testLogicSigMultisigValidation(t, protocol.ConsensusV40, false) }) + t.Run("v41", func(t *testing.T) { testLogicSigMultisigValidation(t, protocol.ConsensusV41, true) }) + t.Run("future", func(t *testing.T) { testLogicSigMultisigValidation(t, protocol.ConsensusFuture, true) }) +} + +func testLogicSigMultisigValidation(t *testing.T, consensusVer protocol.ConsensusVersion, useLMsig bool) { + ops, err := logic.AssembleString("int 1") + require.NoError(t, err) + program := ops.Program + + // Generate test keys + secrets := make([]*crypto.SignatureSecrets, 3) + for i := range secrets { + var seed crypto.Seed + crypto.RandBytes(seed[:]) + secrets[i] = crypto.GenerateSignatureSecrets(seed) + } + + // Helper to create a test transaction + makeTestTxn := func(sender basics.Address) transactions.SignedTxn { + return transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: sender, + Fee: basics.MicroAlgos{Raw: 1000}, + FirstValid: 1, + LastValid: 100, + GenesisHash: crypto.Hash([]byte{1, 2, 3, 4, 5}), + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: basics.Address{}, + Amount: basics.MicroAlgos{Raw: 1000}, + }, + }, + } + } + + // Helper to verify a logic sig + verifyLogicSig := func(t *testing.T, stxn transactions.SignedTxn) error { + blkHdr := createDummyBlockHeader(consensusVer) + dummyLedger := DummyLedgerForSignature{} + groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, &blkHdr, &dummyLedger, nil) + require.NoError(t, err) + return logicSigVerify(0, groupCtx) + } + + t.Run("MultisigToSingleSig", func(t *testing.T) { + pks := []crypto.PublicKey{secrets[0].SignatureVerifier} + msigAddr, err := crypto.MultisigAddrGen(1, 1, pks) + require.NoError(t, err) + + // Sign in multisig context + var msig crypto.MultisigSig + if useLMsig { // >=v41: use MultisigProgram with address binding + msig, err = crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr, Program: program}, msigAddr, 1, 1, pks, *secrets[0]) + } else { // v40: use Program directly + msig, err = crypto.MultisigSign(logic.Program(program), msigAddr, 1, 1, pks, *secrets[0]) + } + require.NoError(t, err) + + // Try to use multisig signature as single sig + stxn := makeTestTxn(basics.Address(secrets[0].SignatureVerifier)) + stxn.Lsig = transactions.LogicSig{ + Logic: program, + Sig: msig.Subsigs[0].Sig, + } + + err = verifyLogicSig(t, stxn) + if useLMsig { + require.ErrorContains(t, err, "At least one signature didn't pass verification") + } else { + require.NoError(t, err) + } + }) + + t.Run("SingleSigToMultisig", func(t *testing.T) { + // Sign as single sig + singleSig := secrets[0].Sign(logic.Program(program)) + + // Create multisig with same key + pks := []crypto.PublicKey{secrets[0].SignatureVerifier} + msigAddr, err := crypto.MultisigAddrGen(1, 1, pks) + require.NoError(t, err) + + // Try to use single sig in multisig + stxn := makeTestTxn(basics.Address(msigAddr)) + msigWithSingleSig := crypto.MultisigSig{Version: 1, Threshold: 1, + Subsigs: []crypto.MultisigSubsig{{Key: secrets[0].SignatureVerifier, Sig: singleSig}}, + } + + if useLMsig { // >=v41: use LMsig field + stxn.Lsig = transactions.LogicSig{Logic: program, LMsig: msigWithSingleSig} + err = verifyLogicSig(t, stxn) + require.ErrorContains(t, err, "At least one signature didn't pass verification") + } else { // v40: use Msig field + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: msigWithSingleSig} + err = verifyLogicSig(t, stxn) + require.NoError(t, err) + } + }) + + t.Run("CrossMultisigValidation", func(t *testing.T) { + // Create two different 1-of-2 multisigs + pks1 := []crypto.PublicKey{secrets[0].SignatureVerifier, secrets[1].SignatureVerifier, secrets[2].SignatureVerifier} + pks2 := []crypto.PublicKey{secrets[0].SignatureVerifier, secrets[1].SignatureVerifier} + + msigAddr1, err := crypto.MultisigAddrGen(1, 2, pks1) + require.NoError(t, err) + msigAddr2, err := crypto.MultisigAddrGen(1, 2, pks2) + require.NoError(t, err) + + // Sign for each multisig + var sig1, sig2 crypto.MultisigSig + if useLMsig { // >=v41: use MultisigProgram with address binding + sig1, err = crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr1, Program: program}, msigAddr1, 1, 2, pks1, *secrets[0]) + require.NoError(t, err) + sig2, err = crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr2, Program: program}, msigAddr2, 1, 2, pks2, *secrets[1]) + require.NoError(t, err) + } else { // v40: use Program directly + sig1, err = crypto.MultisigSign(logic.Program(program), msigAddr1, 1, 2, pks1, *secrets[0]) + require.NoError(t, err) + sig2, err = crypto.MultisigSign(logic.Program(program), msigAddr2, 1, 2, pks2, *secrets[1]) + require.NoError(t, err) + } + + // Try to mix signatures from different multisigs + stxn := makeTestTxn(basics.Address(msigAddr2)) + mixedMsig := crypto.MultisigSig{Version: 1, Threshold: 2, + Subsigs: []crypto.MultisigSubsig{ + {Key: secrets[0].SignatureVerifier, Sig: sig1.Subsigs[0].Sig}, // from msigAddr1 + {Key: secrets[1].SignatureVerifier, Sig: sig2.Subsigs[1].Sig}, // from msigAddr2 + }, + } + + if useLMsig { // >=v41: use LMsig field + stxn.Lsig = transactions.LogicSig{Logic: program, LMsig: mixedMsig} + err = verifyLogicSig(t, stxn) + require.ErrorContains(t, err, "At least one signature didn't pass verification") + } else { // v40: use Msig field + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: mixedMsig} + err = verifyLogicSig(t, stxn) + require.NoError(t, err) + } + }) + + t.Run("DisableMsig", func(t *testing.T) { + // Run on consensus when Msig is disabled, only LMsig allowed + if config.Consensus[consensusVer].LogicSigMsig || !config.Consensus[consensusVer].LogicSigLMsig { + t.Skip("requires LogicSigMsig=false and LogicSigLMsig=true") + } + + pks := []crypto.PublicKey{secrets[0].SignatureVerifier, secrets[1].SignatureVerifier} + msigAddr, err := crypto.MultisigAddrGen(1, 2, pks) + require.NoError(t, err) + + // Sign with address binding + sig1, err := crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr, Program: program}, msigAddr, 1, 2, pks, *secrets[0]) + require.NoError(t, err) + sig2, err := crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr, Program: program}, msigAddr, 1, 2, pks, *secrets[1]) + require.NoError(t, err) + + msig, err := crypto.MultisigAssemble([]crypto.MultisigSig{sig1, sig2}) + require.NoError(t, err) + + // Create a transaction + stxn := makeTestTxn(basics.Address(msigAddr)) + + // Test with Msig field - should be rejected + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: msig} + err = verifyLogicSig(t, stxn) + require.ErrorContains(t, err, "LogicSig Msig field not supported in this consensus version") + + // Test with LMsig field - should work + stxn.Lsig = transactions.LogicSig{Logic: program, LMsig: msig} + err = verifyLogicSig(t, stxn) + require.NoError(t, err) + + // Test with both fields - should fail + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: msig, LMsig: msig} + err = verifyLogicSig(t, stxn) + require.ErrorContains(t, err, "LogicSig should only have one of Sig, Msig, or LMsig but has more than one") + }) +} + +func TestLogicSigMsigBothFlags(t *testing.T) { + partitiontest.PartitionTest(t) + + // Create a test consensus version with both flags enabled + consensusVer := protocol.ConsensusCurrentVersion + testConsensus := config.Consensus[consensusVer] + testConsensus.LogicSigMsig = true + testConsensus.LogicSigLMsig = true + config.Consensus["test-lmsig-flags"] = testConsensus + defer delete(config.Consensus, "test-lmsig-flags") + + // Simple test program that always approves + ops, err := logic.AssembleString("int 1") + require.NoError(t, err) + program := ops.Program + + // Create test keys + var seed crypto.Seed + crypto.RandBytes(seed[:]) + secret := crypto.GenerateSignatureSecrets(seed) + pks := []crypto.PublicKey{secret.SignatureVerifier} + + msigAddr, err := crypto.MultisigAddrGen(1, 1, pks) + require.NoError(t, err) + + // Sign with both methods + msig, err := crypto.MultisigSign(logic.Program(program), msigAddr, 1, 1, pks, *secret) + require.NoError(t, err) + + lmsig, err := crypto.MultisigSign(logic.MultisigProgram{Addr: msigAddr, Program: program}, msigAddr, 1, 1, pks, *secret) + require.NoError(t, err) + + // Create test transaction + stxn := transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: basics.Address(msigAddr), + Fee: basics.MicroAlgos{Raw: 1000}, + FirstValid: 1, + LastValid: 100, + GenesisHash: crypto.Hash([]byte{1, 2, 3, 4, 5}), + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: basics.Address{}, + Amount: basics.MicroAlgos{Raw: 1000}, + }, + }, + } + + // Helper to verify a logic sig + verifyLogicSig := func() error { + blkHdr := createDummyBlockHeader("test-lmsig-flags") + dummyLedger := DummyLedgerForSignature{} + groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, &blkHdr, &dummyLedger, nil) + require.NoError(t, err) + return logicSigVerify(0, groupCtx) + } + + // Test with Msig field only - should work + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: msig} + err = verifyLogicSig() + require.NoError(t, err) + + // Test with LMsig field only - should work + stxn.Lsig = transactions.LogicSig{Logic: program, LMsig: lmsig} + err = verifyLogicSig() + require.NoError(t, err) + + // Test with both fields - should fail + stxn.Lsig = transactions.LogicSig{Logic: program, Msig: msig, LMsig: lmsig} + err = verifyLogicSig() + require.ErrorContains(t, err, "LogicSig should only have one of Sig, Msig, or LMsig but has more than one") +} diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go index 73a33b9761..fd915690d1 100644 --- a/data/transactions/verify/verifiedTxnCache.go +++ b/data/transactions/verify/verifiedTxnCache.go @@ -56,9 +56,9 @@ var errMissingPinnedEntry = &VerifiedTxnCacheError{errors.New("Missing pinned en type VerifiedTransactionCache interface { // Add adds a given transaction group and its associated group context to the cache. If any of the transactions already appear // in the cache, the new entry overrides the old one. - Add(txgroup []transactions.SignedTxn, groupCtx *GroupContext) + Add(groupCtx *GroupContext) // AddPayset works in a similar way to Add, but is intended for adding an array of transaction groups, along with their corresponding contexts. - AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*GroupContext) + AddPayset(groupCtxs []*GroupContext) // GetUnverifiedTransactionGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached. GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn // UpdatePinned replaces the pinned entries with the one provided in the pinnedTxns map. This is typically expected to be a subset of the @@ -98,18 +98,18 @@ func MakeVerifiedTransactionCache(cacheSize int) VerifiedTransactionCache { // Add adds a given transaction group and it's associated group context to the cache. If any of the transactions already appear // in the cache, the new entry overrides the old one. -func (v *verifiedTransactionCache) Add(txgroup []transactions.SignedTxn, groupCtx *GroupContext) { +func (v *verifiedTransactionCache) Add(groupCtx *GroupContext) { v.bucketsLock.Lock() defer v.bucketsLock.Unlock() - v.add(txgroup, groupCtx) + v.add(groupCtx) } // AddPayset works in a similar way to Add, but is intended for adding an array of transaction groups, along with their corresponding contexts. -func (v *verifiedTransactionCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*GroupContext) { +func (v *verifiedTransactionCache) AddPayset(groupCtxs []*GroupContext) { v.bucketsLock.Lock() defer v.bucketsLock.Unlock() - for i := range txgroup { - v.add(txgroup[i], groupCtxs[i]) + for _, groupCtx := range groupCtxs { + v.add(groupCtx) } } @@ -242,14 +242,14 @@ func (v *verifiedTransactionCache) Pin(txgroup []transactions.SignedTxn) (err er } // add is the internal implementation of Add/AddPayset which adds a transaction group to the buffer. -func (v *verifiedTransactionCache) add(txgroup []transactions.SignedTxn, groupCtx *GroupContext) { - if len(v.buckets[v.base])+len(txgroup) > v.entriesPerBucket { +func (v *verifiedTransactionCache) add(groupCtx *GroupContext) { + if len(v.buckets[v.base])+len(groupCtx.signedGroupTxns) > v.entriesPerBucket { // move to the next bucket while deleting the content of the next bucket. v.base = (v.base + 1) % len(v.buckets) v.buckets[v.base] = make(map[transactions.Txid]*GroupContext, v.entriesPerBucket) } currentBucket := v.buckets[v.base] - for _, txn := range txgroup { + for _, txn := range groupCtx.signedGroupTxns { currentBucket[txn.ID()] = groupCtx } } @@ -261,10 +261,10 @@ type mockedCache struct { alwaysVerified bool } -func (v *mockedCache) Add(txgroup []transactions.SignedTxn, groupCtx *GroupContext) { +func (v *mockedCache) Add(groupCtx *GroupContext) { } -func (v *mockedCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*GroupContext) { +func (v *mockedCache) AddPayset(groupCtxs []*GroupContext) { } func (v *mockedCache) GetUnverifiedTransactionGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) { diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go index 956b3ea9e9..6fde7135c1 100644 --- a/data/transactions/verify/verifiedTxnCache_test.go +++ b/data/transactions/verify/verifiedTxnCache_test.go @@ -36,7 +36,7 @@ func TestAddingToCache(t *testing.T) { txnGroups := generateTransactionGroups(protoMaxGroupSize, signedTxn, secrets, addrs) groupCtx, err := PrepareGroupContext(txnGroups[0], blockHeader, nil, nil) require.NoError(t, err) - impl.Add(txnGroups[0], groupCtx) + impl.Add(groupCtx) // make it was added. for _, txn := range txnGroups[0] { ctx, has := impl.buckets[impl.base][txn.ID()] @@ -55,12 +55,13 @@ func TestBucketCycling(t *testing.T) { _, signedTxn, _, _ := generateTestObjects(entriesPerBucket*bucketCount*2, bucketCount, 0, 0) require.Equal(t, entriesPerBucket*bucketCount*2, len(signedTxn)) - groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{signedTxn[0]}, blockHeader, nil, nil) - require.NoError(t, err) // fill up the cache with entries. for i := 0; i < entriesPerBucket*bucketCount; i++ { - impl.Add([]transactions.SignedTxn{signedTxn[i]}, groupCtx) + txnGroup := []transactions.SignedTxn{signedTxn[i]} + groupCtx, err := PrepareGroupContext(txnGroup, blockHeader, nil, nil) + require.NoError(t, err) + impl.Add(groupCtx) // test to see that the base is sliding when bucket get filled up. require.Equal(t, i/entriesPerBucket, impl.base) } @@ -71,7 +72,10 @@ func TestBucketCycling(t *testing.T) { // -- all buckets are full at this point -- // add one additional item which would flush the bottom bucket. - impl.Add([]transactions.SignedTxn{signedTxn[len(signedTxn)-1]}, groupCtx) + txnGroup := []transactions.SignedTxn{signedTxn[len(signedTxn)-1]} + groupCtx, err := PrepareGroupContext(txnGroup, blockHeader, nil, nil) + require.NoError(t, err) + impl.Add(groupCtx) require.Equal(t, 0, impl.base) require.Equal(t, 1, len(impl.buckets[0])) } @@ -93,7 +97,7 @@ func TestGetUnverifiedTransactionGroups50(t *testing.T) { expectedUnverifiedGroups = append(expectedUnverifiedGroups, txnGroups[i]) } else { groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil, nil) - impl.Add(txnGroups[i], groupCtx) + impl.Add(groupCtx) } } @@ -117,7 +121,7 @@ func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) { queryTxnGroups = append(queryTxnGroups, txnGroups[i]) } else { groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil, nil) - impl.Add(txnGroups[i], groupCtx) + impl.Add(groupCtx) } } @@ -146,7 +150,7 @@ func TestUpdatePinned(t *testing.T) { // insert some entries. for i := 0; i < len(txnGroups); i++ { groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil, nil) - impl.Add(txnGroups[i], groupCtx) + impl.Add(groupCtx) } // pin the first half. @@ -175,7 +179,7 @@ func TestPinningTransactions(t *testing.T) { // insert half of the entries. for i := 0; i < len(txnGroups)/2; i++ { groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil, nil) - impl.Add(txnGroups[i], groupCtx) + impl.Add(groupCtx) } // try to pin a previously added entry. diff --git a/data/txHandler.go b/data/txHandler.go index c3108f05c7..beb23f68b2 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -707,13 +707,16 @@ func (eic *erlIPClient) OnClose(f func()) { // by adding a helper closer function to track connection closures func (eic *erlIPClient) register(ec util.ErlClient) { eic.m.Lock() - defer eic.m.Unlock() if _, has := eic.clients[ec]; has { // this peer is known => noop + eic.m.Unlock() return } eic.clients[ec] = struct{}{} + eic.m.Unlock() + // Register the OnClose callback without holding eic.m to avoid + // lock ordering deadlock with wsPeer.closersMu ec.OnClose(func() { eic.connClosed(ec) }) diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 9a07218c6e..e24d5088ae 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -176,8 +176,8 @@ func BenchmarkTxHandlerProcessing(b *testing.B) { // vtCache is a noop VerifiedTransactionCache type vtCache struct{} -func (vtCache) Add(txgroup []transactions.SignedTxn, groupCtx *verify.GroupContext) {} -func (vtCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*verify.GroupContext) { +func (vtCache) Add(groupCtx *verify.GroupContext) {} +func (vtCache) AddPayset(groupCtxs []*verify.GroupContext) { return } func (vtCache) GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn { @@ -542,8 +542,7 @@ func BenchmarkTxHandlerIncDeDup(b *testing.B) { numPoolWorkers := runtime.NumCPU() dupFactor := test.dupFactor avgDelay := test.workerDelay / time.Duration(numPoolWorkers) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() var handler *TxHandler if test.firstLevelOnly { @@ -922,8 +921,7 @@ func TestTxHandlerProcessIncomingCacheRotation(t *testing.T) { t.Run("scheduled", func(t *testing.T) { // double enqueue a single txn message, ensure it discarded - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() + ctx := t.Context() handler := makeTestTxHandlerOrphanedWithContext(ctx, txBacklogSize, txBacklogSize, txHandlerConfig{true, true}, 10*time.Millisecond) @@ -944,8 +942,7 @@ func TestTxHandlerProcessIncomingCacheRotation(t *testing.T) { t.Run("manual", func(t *testing.T) { // double enqueue a single txn message, ensure it discarded - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() + ctx := t.Context() handler := makeTestTxHandlerOrphanedWithContext(ctx, txBacklogSize, txBacklogSize, txHandlerConfig{true, true}, 10*time.Millisecond) @@ -1361,10 +1358,7 @@ func getTransactionGroups(N, numUsers, maxGroupSize int, addresses []basics.Addr txnGrps := make([][]transactions.Transaction, N) protoMaxGrpSize := proto.MaxTxGroupSize for u := 0; u < N; u++ { - grpSize := rand.Intn(protoMaxGrpSize-1) + 1 - if grpSize > maxGroupSize { - grpSize = maxGroupSize - } + grpSize := min(rand.Intn(protoMaxGrpSize-1)+1, maxGroupSize) var txGroup transactions.TxGroup txns := make([]transactions.Transaction, 0, grpSize) for g := 0; g < grpSize; g++ { @@ -1794,10 +1788,7 @@ func runHandlerBenchmarkWithBacklog(b *testing.B, txGen txGenIf, tps int, useBac } // Prepare 1000 transactions - genTCount := 1000 - if b.N < genTCount { - genTCount = b.N - } + genTCount := min(b.N, 1000) signedTransactionGroups, badTxnGroups := txGen.createSignedTxGroups(b, genTCount) var encStxns []network.IncomingMessage if useBacklogWorker { @@ -2354,13 +2345,13 @@ func TestMakeTxHandlerErrors(t *testing.T) { nil, nil, nil, &mocks.MockNetwork{}, config.Local{}, } _, err := MakeTxHandler(opts) - require.Error(t, err, ErrInvalidTxPool) + require.ErrorIs(t, err, ErrInvalidTxPool) opts = TxHandlerOpts{ &pools.TransactionPool{}, nil, nil, &mocks.MockNetwork{}, config.Local{}, } _, err = MakeTxHandler(opts) - require.Error(t, err, ErrInvalidLedger) + require.ErrorIs(t, err, ErrInvalidLedger) // it is not possible to test MakeStreamVerifier returning an error, because it is not possible to // get the ledger to return an error for returining the header of its latest round @@ -2497,7 +2488,7 @@ func TestTxHandlerRestartWithBacklogAndTxPool(t *testing.T) { //nolint:parallelt inputGoodTxnCount := len(signedTransactionGroups) - len(badTxnGroups) tp := handler.txPool - // Wait untill all the expected transactions are in the pool + // Wait until all the expected transactions are in the pool for x := 0; x < 100; x++ { if len(tp.PendingTxGroups()) == inputGoodTxnCount { break @@ -2924,7 +2915,7 @@ func TestTxHandlerErlClientMapper(t *testing.T) { // TestTxHandlerERLIPClient checks that ERL properly handles sender with the same and different addresses: // Configure ERL in following way: // 1. Small maxCapacity=10 fully shared by two IP senders (TxBacklogReservedCapacityPerPeer=5, IncomingConnectionsLimit=0) -// 2. Submit one from both IP senders to initalize per peer-queues and exhaust shared capacity +// 2. Submit one from both IP senders to initialize per peer-queues and exhaust shared capacity // 3. Make sure the third peer does not come through // 4. Make sure extra messages from the first peer and second peer are accepted func TestTxHandlerERLIPClient(t *testing.T) { diff --git a/gen/generate_test.go b/gen/generate_test.go index fcd4d38970..36c3bd4a07 100644 --- a/gen/generate_test.go +++ b/gen/generate_test.go @@ -22,6 +22,7 @@ import ( "io" "os" "path/filepath" + "slices" "strings" "sync" "testing" @@ -41,6 +42,7 @@ import ( ) func TestLoadMultiRootKeyConcurrent(t *testing.T) { + partitiontest.PartitionTest(t) t.Skip() // skip in auto-test mode a := require.New(t) tempDir := t.TempDir() @@ -80,6 +82,7 @@ func TestLoadMultiRootKeyConcurrent(t *testing.T) { } func TestLoadSingleRootKeyConcurrent(t *testing.T) { + partitiontest.PartitionTest(t) t.Skip() // skip in auto-test mode a := require.New(t) tempDir := t.TempDir() @@ -171,12 +174,7 @@ func TestGenesisJsonCreation(t *testing.T) { deterministicAddresses := []string{"FeeSink", "RewardsPool"} isNondeterministicAddress := func(name string) bool { - for _, address := range deterministicAddresses { - if name == address { - return false - } - } - return true + return !slices.Contains(deterministicAddresses, name) } for i := range as { diff --git a/go.mod b/go.mod index c3ccdf86d4..ca3cf2f0ce 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,23 @@ module github.com/algorand/go-algorand -go 1.23.0 +go 1.25.0 -toolchain go1.23.9 +toolchain go1.25.3 require ( github.com/DataDog/zstd v1.5.2 github.com/algorand/avm-abi v0.2.0 github.com/algorand/falcon v0.1.0 github.com/algorand/go-codec/codec v1.1.10 - github.com/algorand/go-deadlock v0.2.4 + github.com/algorand/go-deadlock v0.2.5 github.com/algorand/go-sumhash v0.1.0 github.com/algorand/graphtrace v0.1.0 - github.com/algorand/msgp v1.1.60 + github.com/algorand/msgp v1.1.61 github.com/algorand/sortition v1.0.0 github.com/algorand/websocket v1.4.6 github.com/aws/aws-sdk-go v1.34.0 github.com/cockroachdb/pebble v0.0.0-20230807162746-af8c5f279001 - github.com/consensys/gnark-crypto v0.12.1 + github.com/consensys/gnark-crypto v0.18.1 github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c github.com/dchest/siphash v1.2.1 github.com/fatih/color v1.13.0 @@ -28,6 +28,7 @@ require ( github.com/google/go-querystring v1.0.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 + github.com/hdevalence/ed25519consensus v0.2.0 github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jmoiron/sqlx v1.2.0 @@ -49,7 +50,7 @@ require ( github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.5.0 + github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 go.opencensus.io v0.24.0 go.uber.org/zap v1.27.0 @@ -63,20 +64,20 @@ require ( ) require ( + filippo.io/edwards25519 v1.0.0 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.8.1 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.8 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect - github.com/consensys/bavard v0.1.13 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -100,7 +101,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.24.3 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect @@ -135,7 +136,6 @@ require ( github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect @@ -154,7 +154,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect github.com/pion/datachannel v1.5.9 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/ice/v2 v2.3.36 // indirect @@ -177,13 +177,13 @@ require ( github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/quic-go v0.49.1 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/ugorji/go/codec v1.2.14 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -204,8 +204,6 @@ require ( golang.org/x/tools v0.26.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 0d7b065bca..d42c15dc5b 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -24,14 +26,14 @@ github.com/algorand/falcon v0.1.0 h1:xl832kfZ7hHG6B4p90DQynjfKFGbIUgUOnsRiMZXfAo github.com/algorand/falcon v0.1.0/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ= github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA= github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k= -github.com/algorand/go-deadlock v0.2.4 h1:UMs6GwE2wHC6BUZo5z32/+SrBey1LQjbkZQ3V7DoGVA= -github.com/algorand/go-deadlock v0.2.4/go.mod h1:tewhAviZpVq2cnGHmfT50l6RwWLnuygnfNntCN2fz0M= +github.com/algorand/go-deadlock v0.2.5 h1:Kn3WJMn9+wK1pqJrr2+1/y3Z8p1dcftpr2Mbbl1CShw= +github.com/algorand/go-deadlock v0.2.5/go.mod h1:z0g1kdYBhezsHoEKqYf5dVnP9dGMwOOqqxUSTCk2Oks= github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg= github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc= github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM= github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc= -github.com/algorand/msgp v1.1.60 h1:+IVUC34+tSj1P2M1mkYtl4GLyfzdzXfBLSw6TDT19M8= -github.com/algorand/msgp v1.1.60/go.mod h1:RqZQBzAFDWpwh5TlabzZkWy+6kwL9cvXfLbU0gD99EA= +github.com/algorand/msgp v1.1.61 h1:IDSCGKLIi60n6j0lHDu37GTsCo9anw49Rq4PTwsDQsQ= +github.com/algorand/msgp v1.1.61/go.mod h1:j9sEjNKkS12H0Yhwov/3MfzhM60n3iyr81Ymzv49pu8= github.com/algorand/sortition v1.0.0 h1:PJiZtdSTBm4nArQrZXBnhlljHXhuyAXRJBqVWowQu3E= github.com/algorand/sortition v1.0.0/go.mod h1:23CZwAbTWPv0bBsq+Php/2J6Y/iXDyzlfcZyepeY5Fo= github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc= @@ -50,8 +52,8 @@ github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -82,10 +84,8 @@ github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 h1:DJK8W/iB+s/qkTtmXSrHA49lp5O3OsR7E6z4byOLy34= github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= +github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -99,8 +99,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -231,7 +231,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20241017200806-017d972448fc h1:NGyrhhFhwvRAZg02jnYVg3GBQy0qGBKmFQJwaPmpmxs= github.com/google/pprof v0.0.0-20241017200806-017d972448fc/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -259,13 +258,16 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/boxo v0.24.3 h1:gldDPOWdM3Rz0v5LkVLtZu7A7gFNvAlWcmxhCqlHR3c= github.com/ipfs/boxo v0.24.3/go.mod h1:h0DRzOY1IBFDHp6KNvrJLMFdSXTYID0Zf+q7X05JsNg= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= @@ -351,8 +353,8 @@ github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAf github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -433,9 +435,6 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= @@ -502,8 +501,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 h1:qli3BGQK0tYDkSEvZ/FzZTi9ZrOX86Q6CIhKLGc489A= -github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= @@ -572,8 +571,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= -github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= +github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -629,12 +628,13 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -975,7 +975,5 @@ lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/installer/config.json.example b/installer/config.json.example index fec7d7bf6d..f47ffed51f 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,5 @@ { - "Version": 36, + "Version": 37, "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AgreementIncomingBundlesQueueLength": 15, @@ -42,6 +42,7 @@ "EnableAgreementReporting": false, "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, + "EnableBatchVerification": true, "EnableBlockService": false, "EnableDHTProviders": false, "EnableDeveloperAPI": false, @@ -118,6 +119,7 @@ "RestReadTimeoutSeconds": 15, "RestWriteTimeoutSeconds": 120, "RunHosted": false, + "StatefulVoteCompressionTableSize": 2048, "StateproofDir": "", "StorageEngine": "sqlite", "SuggestedFeeBlockHistory": 3, diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 7c1dd70eb7..021d23597a 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -23,6 +23,7 @@ import ( "encoding/binary" "errors" "fmt" + "maps" "math" "math/rand" "os" @@ -188,9 +189,7 @@ func creatablesFromUpdates(base map[basics.Address]basics.AccountData, updates l func applyPartialDeltas(base map[basics.Address]basics.AccountData, deltas ledgercore.AccountDeltas) map[basics.Address]basics.AccountData { result := make(map[basics.Address]basics.AccountData, len(base)+deltas.Len()) - for addr, ad := range base { - result[addr] = ad - } + maps.Copy(result, base) for i := 0; i < deltas.Len(); i++ { addr, _ := deltas.GetByIdx(i) @@ -475,10 +474,7 @@ func randomCreatableSampling(iteration int, crtbsList []basics.CreatableIndex, iteration-- // 0-based here delSegmentEnd := iteration * numElementsPerSegement - delSegmentStart := delSegmentEnd - numElementsPerSegement - if delSegmentStart < 0 { - delSegmentStart = 0 - } + delSegmentStart := max(delSegmentEnd-numElementsPerSegement, 0) newSample := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) stop := delSegmentEnd + numElementsPerSegement @@ -707,10 +703,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo b.StopTimer() balancesLoopStart := time.Now() // generate a chunk; - chunkSize := targetAccountsCount - accountsLoaded - if chunkSize > BalancesPerCatchpointFileChunk { - chunkSize = BalancesPerCatchpointFileChunk - } + chunkSize := min(targetAccountsCount-accountsLoaded, BalancesPerCatchpointFileChunk) last64KSize += chunkSize if accountsLoaded >= targetAccountsCount-64*1024 && last64KStart.IsZero() { last64KStart = time.Now() @@ -1484,18 +1477,10 @@ func (m mockAccountWriter) clone() (m2 mockAccountWriter) { m2.resources = make(map[mockResourcesKey]ledgercore.AccountResource, len(m.resources)) m2.addresses = make(map[basics.Address]trackerdb.AccountRef, len(m.resources)) m2.rowids = make(map[trackerdb.AccountRef]basics.Address, len(m.rowids)) - for k, v := range m.accounts { - m2.accounts[k] = v - } - for k, v := range m.resources { - m2.resources[k] = v - } - for k, v := range m.addresses { - m2.addresses[k] = v - } - for k, v := range m.rowids { - m2.rowids[k] = v - } + maps.Copy(m2.accounts, m.accounts) + maps.Copy(m2.resources, m.resources) + maps.Copy(m2.addresses, m.addresses) + maps.Copy(m2.rowids, m.rowids) m2.lastAcctRef = m.lastAcctRef m2.availAcctRefs = m.availAcctRefs return m2 @@ -1821,7 +1806,7 @@ func compactResourcesDeltasPermutations(a *require.Assertions, crd compactResour // Investigation shown there was another account YF5GJTPPMOUPU2GRGGVP2PGJTQZWGSWZISFHNIKDJSZ2CDPPWN4KKKYVQE // opted in into the same app 22045503. During the commit range the following happened: // at 16541783 YF5 made a payment txn (one acct delta) -// at 16541785 RGJ has been funded and and opted in into app 22045503 (one acct delta, one res delta) +// at 16541785 RGJ has been funded and opted in into app 22045503 (one acct delta, one res delta) // at 16541788 YF5 address had clear state txn for 22045503, and close out txn for the entire account (one acct delta, one res delta) // Because YF5 had modifications before RGJ, all its acct deltas were compacted into a single entry before RGJ (delete, create) // In the same time, the order in resources delta remained the same (opt-in, delete). @@ -2791,6 +2776,8 @@ func TestAccountOnlineRoundParams(t *testing.T) { // onlineAccountsDelete(2): A online // onlineAccountsDelete(3): A offline, B online // etc +// +//nolint:dupword // ignore func TestOnlineAccountsDeletion(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/ledger/acctonline.go b/ledger/acctonline.go index a66b993ced..ec4cb0e590 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -345,6 +345,7 @@ func (ao *onlineAccounts) consecutiveVersion(offset uint64) uint64 { if ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+int(offset)].CurrentProtocol { // find the tip point. tipPoint := sort.Search(int(offset), func(i int) bool { + //nolint:dupword // ignore // we're going to search here for version inequality, with the assumption that consensus versions won't repeat. // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3]. return ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+1+i].CurrentProtocol diff --git a/ledger/acctonline_expired_test.go b/ledger/acctonline_expired_test.go index 3e46c41ecb..90ebc931a1 100644 --- a/ledger/acctonline_expired_test.go +++ b/ledger/acctonline_expired_test.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/txntest" @@ -320,6 +321,7 @@ func (m *doubleLedgerAcctModel) goOnline(addr basics.Address, firstvalid, lastva // meaningless non-zero voting data VotePK: crypto.OneTimeSignatureVerifier(addr), SelectionPK: crypto.VRFVerifier(addr), + StateProofPK: merklesignature.Commitment{1}, VoteKeyDilution: 1024, }) m.accts[addr] = m.ops.Sub(m.accts[addr], basics.MicroAlgos{Raw: minFee}) diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index d45f7da550..6654e8718c 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -19,6 +19,7 @@ package ledger import ( "context" "fmt" + "maps" "sort" "strconv" "testing" @@ -304,7 +305,7 @@ func TestAcctOnline(t *testing.T) { require.NoError(t, err) require.Empty(t, oad) } - // check next next account + // check next-next account // for the account 2, it set to Offline at round 3 // at round 1 + 1 = 2 it online and should te correctly retrieved from DB and lookup nextNextAcctIdx := nextAcctIdx + 1 @@ -871,9 +872,7 @@ func TestAcctOnlineCacheDBSync(t *testing.T) { copyGenesisAccts := func() []map[basics.Address]basics.AccountData { accounts := []map[basics.Address]basics.AccountData{{}} accounts[0] = make(map[basics.Address]basics.AccountData, numAccts) - for addr, ad := range genesisAccts[0] { - accounts[0][addr] = ad - } + maps.Copy(accounts[0], genesisAccts[0]) return accounts } diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 892cf43766..71cf03d981 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "maps" "os" "runtime" "strings" @@ -167,15 +168,12 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker { log: dblogger, blocks: make([]blockEntry, len(ml.blocks)), deltas: make([]ledgercore.StateDelta, len(ml.deltas)), - accts: make(map[basics.Address]basics.AccountData), + accts: maps.Clone(ml.accts), filename: fn, consensusParams: ml.consensusParams, consensusVersion: ml.consensusVersion, trackers: trackerRegistry{log: dblogger}, } - for k, v := range ml.accts { - newLedgerTracker.accts[k] = v - } copy(newLedgerTracker.blocks, ml.blocks) copy(newLedgerTracker.deltas, ml.deltas) @@ -1119,7 +1117,7 @@ func TestKVCache(t *testing.T) { require.False(t, has) } - // verify commited kvs appear in the kv cache + // verify committed kvs appear in the kv cache for ; currentDBRound <= au.cachedDBRound; currentDBRound++ { startKV := (currentDBRound - 1) * basics.Round(kvsPerBlock) for j := 0; j < kvsPerBlock; j++ { @@ -1163,7 +1161,7 @@ func TestKVCache(t *testing.T) { } } - // verify commited updated kv values appear in the kv cache + // verify committed updated kv values appear in the kv cache for ; currentDBRound <= au.cachedDBRound; currentDBRound++ { lookback := basics.Round(kvCnt/kvsPerBlock + int(conf.MaxAcctLookback) + 1) if currentDBRound < lookback { @@ -1215,7 +1213,7 @@ func TestKVCache(t *testing.T) { } } - // verify commited updated kv values appear in the kv cache + // verify committed updated kv values appear in the kv cache for ; currentDBRound <= au.cachedDBRound; currentDBRound++ { lookback := basics.Round(2*(kvCnt/kvsPerBlock+int(conf.MaxAcctLookback)) + 1) if currentDBRound < lookback { @@ -1948,7 +1946,7 @@ func TestAcctUpdatesResources(t *testing.T) { updates.UpsertAssetResource(addr1, aidx, ledgercore.AssetParamsDelta{}, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 200}}) } - // test 2: send back to creator creator + // test 2: send back to creator // expect matching balances at the end creatorParams := ledgercore.AssetParamsDelta{Params: &basics.AssetParams{Total: 1000}} if i == 4 { diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 8be15bae91..a0235caec8 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -144,32 +144,64 @@ return` a.Contains(genesisInitState.Accounts, userLocal) a.Contains(genesisInitState.Accounts, userLocal2) + // Calculate expected balances based on the protocol's MinTxnFee + // This makes the test resilient to fee changes + + // Look up initial balances - these addresses are deterministic based on the GenerateInitState function + // The creator address corresponds to a specific genaddrs index + creatorInitialBalance := genesisInitState.Accounts[creator].MicroAlgos.Raw + userOptinInitialBalance := genesisInitState.Accounts[userOptin].MicroAlgos.Raw + userLocalInitialBalance := genesisInitState.Accounts[userLocal].MicroAlgos.Raw + + // Creator pays 1 transaction with Fee = 2 * MinTxnFee + creatorFinalBalance := creatorInitialBalance - (proto.MinTxnFee * 2) + // UserOptin pays 1 transaction with Fee = 2 * MinTxnFee + userOptinFinalBalance := userOptinInitialBalance - (proto.MinTxnFee * 2) + // UserLocal pays 2 transactions with Fee = 2 * MinTxnFee each + userLocalFinalBalance := userLocalInitialBalance - (proto.MinTxnFee * 2 * 2) + + // Build the expected encoded structures dynamically + // Note: These are MessagePack encoded AccountData structures + // We need to preserve the exact format but update the balance fields + var expectedCreatorBase, expectedCreatorResource, expectedUserOptInBase, expectedUserOptInResource, expectedUserLocalBase, expectedUserLocalResource []byte // the difference between these encoded structure is the UpdateRound variable. This variable is not being set before // the consensus upgrade, and affects only nodes that have been updated. if proto.EnableLedgerDataUpdateRound { - expectedCreatorBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce009d2290a16704a16b01a17a01") + // Build the hex strings dynamically with the calculated balances + // Format: 87a14301a144ce000186a0a16101a162ce[BALANCE]a16704a16b01a17a01 + creatorHex := fmt.Sprintf("87a14301a144ce000186a0a16101a162ce%08xa16704a16b01a17a01", creatorFinalBalance) + expectedCreatorBase, err = hex.DecodeString(creatorHex) a.NoError(err) expectedCreatorResource, err = hex.DecodeString("86a171c45602200200012604056c6f63616c06676c6f62616c026c6b02676b3118221240003331192212400010311923124000022243311b221240001c361a00281240000a361a0029124000092243222a28664200032b29672343a172c40102a17501a17704a17903a17a01") a.NoError(err) - expectedUserOptInBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce00a02fd0a16701a16c01a17a02") + + userOptinHex := fmt.Sprintf("87a14301a144ce000186a0a16101a162ce%08xa16701a16c01a17a02", userOptinFinalBalance) + expectedUserOptInBase, err = hex.DecodeString(userOptinHex) a.NoError(err) expectedUserOptInResource, err = hex.DecodeString("82a16f01a17a02") a.NoError(err) - expectedUserLocalBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce00a33540a16701a16c01a17a04") + + userLocalHex := fmt.Sprintf("87a14301a144ce000186a0a16101a162ce%08xa16701a16c01a17a04", userLocalFinalBalance) + expectedUserLocalBase, err = hex.DecodeString(userLocalHex) a.NoError(err) expectedUserLocalResource, err = hex.DecodeString("83a16f01a17081a26c6b82a27462a56c6f63616ca2747401a17a04") a.NoError(err) } else { - expectedCreatorBase, err = hex.DecodeString("84a16101a162ce009d2290a16704a16b01") + creatorHex := fmt.Sprintf("84a16101a162ce%08xa16704a16b01", creatorFinalBalance) + expectedCreatorBase, err = hex.DecodeString(creatorHex) a.NoError(err) expectedCreatorResource, err = hex.DecodeString("85a171c45602200200012604056c6f63616c06676c6f62616c026c6b02676b3118221240003331192212400010311923124000022243311b221240001c361a00281240000a361a0029124000092243222a28664200032b29672343a172c40102a17501a17704a17903") a.NoError(err) - expectedUserOptInBase, err = hex.DecodeString("84a16101a162ce00a02fd0a16701a16c01") + + userOptinHex := fmt.Sprintf("84a16101a162ce%08xa16701a16c01", userOptinFinalBalance) + expectedUserOptInBase, err = hex.DecodeString(userOptinHex) a.NoError(err) expectedUserOptInResource, err = hex.DecodeString("81a16f01") a.NoError(err) - expectedUserLocalBase, err = hex.DecodeString("84a16101a162ce00a33540a16701a16c01") + + userLocalHex := fmt.Sprintf("84a16101a162ce%08xa16701a16c01", userLocalFinalBalance) + expectedUserLocalBase, err = hex.DecodeString(userLocalHex) a.NoError(err) expectedUserLocalResource, err = hex.DecodeString("82a16f01a17081a26c6b82a27462a56c6f63616ca2747401") a.NoError(err) diff --git a/ledger/apply/heartbeat_test.go b/ledger/apply/heartbeat_test.go index bc0c775de4..8e1fe05d52 100644 --- a/ledger/apply/heartbeat_test.go +++ b/ledger/apply/heartbeat_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -35,6 +36,7 @@ func TestHeartbeat(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + testConsensusVersion := protocol.ConsensusFuture // Creator sender := basics.Address{0x01} voter := basics.Address{0x02} @@ -46,7 +48,7 @@ func TestHeartbeat(t *testing.T) { id := basics.OneTimeIDForRound(lv, keyDilution) otss := crypto.GenerateOneTimeSignatureSecrets(1, 2) // This will cover rounds 1-2*777 - mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + mockBal := makeMockBalancesWithAccounts(testConsensusVersion, map[basics.Address]basics.AccountData{ sender: { MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, }, @@ -87,7 +89,8 @@ func TestHeartbeat(t *testing.T) { require.ErrorContains(t, err, "cheap heartbeat") // address fee - tx.Fee = basics.MicroAlgos{Raw: 1000} + testProto := config.Consensus[testConsensusVersion] + tx.Fee = basics.MicroAlgos{Raw: testProto.MinTxnFee} // Seed is missing err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) @@ -123,6 +126,7 @@ func TestCheapRules(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + testConsensusVersion := protocol.ConsensusFuture type tcase struct { rnd basics.Round addrStart byte @@ -160,7 +164,7 @@ func TestCheapRules(t *testing.T) { sender := basics.Address{0x01} voter := basics.Address{tc.addrStart} - mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + mockBal := makeMockBalancesWithAccounts(testConsensusVersion, map[basics.Address]basics.AccountData{ sender: { MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, }, @@ -177,7 +181,7 @@ func TestCheapRules(t *testing.T) { mockHdr := makeMockHeaders() mockHdr.setFallback(bookkeeping.BlockHeader{ UpgradeState: bookkeeping.UpgradeState{ - CurrentProtocol: protocol.ConsensusFuture, + CurrentProtocol: testConsensusVersion, }, Seed: seed, }) diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go index 8f8047feaf..b66eae15d9 100644 --- a/ledger/apptxn_test.go +++ b/ledger/apptxn_test.go @@ -47,6 +47,7 @@ func TestPayAction(t *testing.T) { ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] ai := dl.fundedApp(addrs[0], 200000, // account min balance, plus fees main(` @@ -62,19 +63,24 @@ func TestPayAction(t *testing.T) { // We're going to test some payout effects here too, so that we have an inner transaction example. proposer := basics.Address{0x01, 0x02, 0x03} + stateProofPK := merklesignature.Commitment{0x03} + if ver < 31 { // no state proof support + stateProofPK = merklesignature.Commitment{} + } dl.txns(&txntest.Txn{ Type: "pay", Sender: addrs[7], Receiver: proposer, Amount: 1_000_000 * 1_000_000, // 1 million algos is surely an eligible amount }, &txntest.Txn{ - Type: "keyreg", - Sender: proposer, - Fee: 3_000_000, - VotePK: crypto.OneTimeSignatureVerifier{0x01}, - SelectionPK: crypto.VRFVerifier{0x02}, - StateProofPK: merklesignature.Commitment{0x03}, - VoteFirst: 1, VoteLast: 1000, + Type: "keyreg", + Sender: proposer, + Fee: 3_000_000, + VotePK: crypto.OneTimeSignatureVerifier{0x01}, + SelectionPK: crypto.VRFVerifier{0x02}, + VoteKeyDilution: 1000, + StateProofPK: stateProofPK, + VoteFirst: 1, VoteLast: 1000, }) payout1 := txntest.Txn{ @@ -93,7 +99,7 @@ func TestPayAction(t *testing.T) { const payoutsVer = 40 if ver >= payoutsVer { require.True(t, dl.generator.GenesisProto().Payouts.Enabled) - require.EqualValues(t, 2000, vb.Block().FeesCollected.Raw) + require.EqualValues(t, 2*proto.MinTxnFee, vb.Block().FeesCollected.Raw) } else { require.False(t, dl.generator.GenesisProto().Payouts.Enabled) require.Zero(t, vb.Block().FeesCollected) @@ -104,11 +110,11 @@ func TestPayAction(t *testing.T) { dl.t.Log("postsink", postsink, "postprop", postprop) if ver >= payoutsVer { - bonus := 10_000_000 // config/consensus.go - assert.EqualValues(t, bonus-1000, presink-postsink) // based on 50% in config/consensus.go - require.EqualValues(t, bonus+1000, postprop-preprop) + bonus := 10_000_000 // config/consensus.go + assert.EqualValues(t, bonus-int(proto.MinTxnFee), presink-postsink) // based on 50% in config/consensus.go + require.EqualValues(t, bonus+int(proto.MinTxnFee), postprop-preprop) } else { - require.EqualValues(t, 2000, postsink-presink) // no payouts yet + require.EqualValues(t, 2*proto.MinTxnFee, postsink-presink) // no payouts yet } ad0 := micros(dl.t, dl.generator, addrs[0]) @@ -116,12 +122,12 @@ func TestPayAction(t *testing.T) { app := micros(dl.t, dl.generator, ai.Address()) genAccounts := genBalances.Balances - // create(1000) and fund(1000 + 200000) - require.Equal(t, uint64(202000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0) - // paid 5000, but 1000 fee - require.Equal(t, uint64(4000), ad1-genAccounts[addrs[1]].MicroAlgos.Raw) - // app still has 194000 (paid out 5000, and paid fee to do it) - require.Equal(t, uint64(194000), app) + // create(MinTxnFee) and fund(MinTxnFee + 200000) + require.Equal(t, uint64(2*proto.MinTxnFee+200000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0) + // paid 5000, but MinTxnFee fee + require.Equal(t, uint64(5000-proto.MinTxnFee), ad1-genAccounts[addrs[1]].MicroAlgos.Raw) + // app still has 200000-5000-MinTxnFee (paid out 5000, and paid fee to do it) + require.Equal(t, uint64(200000-5000-proto.MinTxnFee), app) // Build up Residue in RewardsState so it's ready to pay for i := 1; i < 10; i++ { @@ -157,11 +163,11 @@ func TestPayAction(t *testing.T) { ad2 := micros(dl.t, dl.validator, addrs[2]) app = micros(dl.t, dl.validator, ai.Address()) - // paid 5000, in first payout (only), but paid 1000 fee in each payout txn - require.Equal(t, rewards+3000, ad1-genAccounts[addrs[1]].MicroAlgos.Raw) - // app still has 188000 (paid out 10000, and paid 2k fees to do it) + // paid 5000, in first payout (only), but paid MinTxnFee fee in each payout txn + require.Equal(t, rewards+5000-2*proto.MinTxnFee, ad1-genAccounts[addrs[1]].MicroAlgos.Raw) + // app still has 200000-10000-2*MinTxnFee (paid out 10000, and paid 2 fees to do it) // no rewards because owns less than an algo - require.Equal(t, uint64(200000)-10000-2000, app) + require.Equal(t, uint64(200000)-10000-2*proto.MinTxnFee, app) // paid 5000 by payout2, never paid any fees, got same rewards require.Equal(t, rewards+uint64(5000), ad2-genAccounts[addrs[2]].MicroAlgos.Raw) @@ -190,7 +196,7 @@ func TestPayAction(t *testing.T) { appreward := inners[0].SenderRewards.Raw require.Greater(t, appreward, uint64(1000)) - require.Equal(t, beforepay+appreward-5000-1000, afterpay) + require.Equal(t, beforepay+appreward-5000-proto.MinTxnFee, afterpay) }) } @@ -362,6 +368,7 @@ func TestClawbackAction(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] app := txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -400,7 +407,7 @@ func TestClawbackAction(t *testing.T) { Type: "pay", Sender: bystander, Receiver: bystander, - Fee: 2000, // Overpay fee so that app account can be unfunded + Fee: 2 * proto.MinTxnFee, // Overpay fee so that app account can be unfunded } clawmove := txntest.Txn{ Type: "appl", @@ -430,6 +437,7 @@ func TestRekeyAction(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] ezpayer := txntest.Txn{ Type: "appl", Sender: addrs[5], @@ -472,7 +480,7 @@ skipclose: // addrs[2] got paid require.Equal(t, uint64(5000), micros(t, dl.generator, addrs[2])-micros(t, dl.generator, addrs[6])) // addrs[0] paid 5k + rekey fee + inner txn fee - require.Equal(t, uint64(7000), micros(t, dl.generator, addrs[6])-micros(t, dl.generator, addrs[0])) + require.Equal(t, 5000+2*proto.MinTxnFee, micros(t, dl.generator, addrs[6])-micros(t, dl.generator, addrs[0])) baduse := txntest.Txn{ Type: "appl", @@ -581,6 +589,7 @@ func TestDuplicatePayAction(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] source := main(` itxn_begin int pay; itxn_field TypeEnum @@ -610,12 +619,12 @@ func TestDuplicatePayAction(t *testing.T) { ad1 := micros(t, dl.generator, addrs[1]) app := micros(t, dl.generator, appID.Address()) - // create(1000) and fund(1000 + 200000), extra create+fund (1000 + 201000) - require.Equal(t, 404000, int(genBalances.Balances[addrs[0]].MicroAlgos.Raw-ad0)) - // paid 10000, but 1000 fee on tx - require.Equal(t, 9000, int(ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw)) - // app still has 188000 (paid out 10000, and paid 2 x fee to do it) - require.Equal(t, 188000, int(app)) + // create(MinTxnFee) and fund(MinTxnFee + 200000), extra create+fund (MinTxnFee + 200000+MinTxnFee) + require.Equal(t, int(4*proto.MinTxnFee+400_000), int(genBalances.Balances[addrs[0]].MicroAlgos.Raw-ad0)) + // paid 10000, but MinTxnFee fee on tx + require.Equal(t, int(10_000-proto.MinTxnFee), int(ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw)) + // app still has (200000 - 10000 - 2*MinTxnFee) = 190000 - 2*MinTxnFee (paid out 10000, and paid 2 x fee to do it) + require.Equal(t, int(200_000-10_000-2*proto.MinTxnFee), int(app)) // Now create another app, and see if it gets the ID we expect (2 // higher, because of the intervening fund txn) @@ -673,6 +682,7 @@ func TestAcfgAction(t *testing.T) { ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] appID := dl.fundedApp(addrs[0], 200_000, // exactly account min balance + one asset main(` @@ -777,13 +787,15 @@ submit: itxn_submit } // Can't create an asset if you have exactly 200,000 and need to pay fee - dl.txn(&createAsa, "balance 199000 below min 200000") - // add some more + // After fundedApp, the app has 200_000 - proto.MinTxnFee (paid during app call/funding) + dl.txn(&createAsa, fmt.Sprintf("balance %d below min 200000", 200_000-proto.MinTxnFee)) + // add some more (need to add enough to reach 200_000 MBR requirement) + // App has 200_000 - proto.MinTxnFee, needs 200_000, so add proto.MinTxnFee dl.txn(&txntest.Txn{ Type: "pay", Sender: addrs[0], Receiver: appID.Address(), - Amount: 10_000, + Amount: proto.MinTxnFee, }) asaID := dl.txn(&createAsa).EvalDelta.InnerTxns[0].ConfigAsset require.NotZero(t, asaID) @@ -799,6 +811,14 @@ submit: itxn_submit require.Equal(t, appID.Address(), asaParams.Manager) + // Fund the app for the subsequent operations (4 operations * proto.MinTxnFee for inner txns) + dl.txn(&txntest.Txn{ + Type: "pay", + Sender: addrs[0], + Receiver: appID.Address(), + Amount: 4 * proto.MinTxnFee, + }) + for _, a := range []string{"reserve", "freeze", "clawback", "manager"} { check := txntest.Txn{ Type: "appl", @@ -960,10 +980,11 @@ func TestInnerAppCreateAndOptin(t *testing.T) { `)) // Don't use `main` here, we want to do the work during creation. Rekey // to the helper and invoke it, trusting it to opt us into the ASA. + proto := config.Consensus[cv] createapp := txntest.Txn{ Type: "appl", Sender: addrs[0], - Fee: 3 * 1000, // to pay for self, call to helper, and helper's axfer + Fee: 3 * proto.MinTxnFee, // to pay for self, call to helper, and helper's axfer ApprovalProgram: ` itxn_begin int appl; itxn_field TypeEnum @@ -1014,10 +1035,11 @@ func TestParentGlobals(t *testing.T) { itxn_submit int 1 ` + proto := config.Consensus[cv] createapp := txntest.Txn{ Type: "appl", Sender: addrs[0], - Fee: 2 * 1000, // to pay for self and call to helper + Fee: 2 * proto.MinTxnFee, // to pay for self and call to helper ApprovalProgram: createProgram, ForeignApps: []basics.AppIndex{checkParent}, } @@ -1034,7 +1056,7 @@ func TestParentGlobals(t *testing.T) { outer := txntest.Txn{ Type: "appl", Sender: addrs[0], - Fee: 3 * 1000, // to pay for self, call to inner create, and its call to helper + Fee: 3 * proto.MinTxnFee, // to pay for self, call to inner create, and its call to helper ApprovalProgram: ` itxn_begin int appl; itxn_field TypeEnum @@ -1103,6 +1125,7 @@ func TestKeyreg(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] app := txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -1160,8 +1183,8 @@ nonpart: ApplicationArgs: [][]byte{[]byte("pay")}, } dl.fullBlock(&pay) - // 2000 was earned in rewards (- 1000 fee, -1 pay) - require.Equal(t, 1_000_000_999, int(micros(t, dl.generator, appID.Address()))) + // 2000 was earned in rewards (- MinTxnFee fee, -1 pay) + require.Equal(t, int(1_000_000_000+2000-proto.MinTxnFee-1), int(micros(t, dl.generator, appID.Address()))) // Go nonpart nonpart := txntest.Txn{ @@ -1171,7 +1194,8 @@ nonpart: ApplicationArgs: [][]byte{[]byte("nonpart")}, } dl.fullBlock(&nonpart) - require.Equal(t, 999_999_999, int(micros(t, dl.generator, appID.Address()))) + // After nonpart: previous balance - MinTxnFee + require.Equal(t, int(1_000_000_000+2000-2*proto.MinTxnFee-1), int(micros(t, dl.generator, appID.Address()))) // Build up Residue in RewardsState so it's ready to pay AGAIN // But expect no rewards @@ -1180,7 +1204,9 @@ nonpart: } dl.txn(pay.Noted("again")) dl.txn(nonpart.Noted("again"), "cannot change online/offline") - require.Equal(t, 999_998_998, int(micros(t, dl.generator, appID.Address()))) + // After one more successful txn (the nonpart fails): previous balance - MinTxnFee - 1 (pay) + // Note: The second nonpart.Noted("again") fails with "cannot change online/offline", so no fee is charged + require.Equal(t, int(1_000_000_000+2000-3*proto.MinTxnFee-2), int(micros(t, dl.generator, appID.Address()))) }) } @@ -1633,6 +1659,7 @@ func TestMaxInnerTxForSingleAppCall(t *testing.T) { ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] program := ` txn ApplicationArgs 0 @@ -1670,7 +1697,7 @@ assert Type: "pay", Sender: addrs[0], Receiver: id0.Address(), - Amount: 1_000_000, + Amount: 256 * proto.MinTxnFee * 3, // 256 inner txns × MinTxnFee × 3 (extra buffer for min balance + variable fees) } app1 := txntest.Txn{ @@ -1687,6 +1714,15 @@ assert payset := dl.txns(&app1, &fund0) id1 := payset[0].ApplicationID + // Fund app1 as well since it will be called by inner transactions + fund1 := txntest.Txn{ + Type: "pay", + Sender: addrs[0], + Receiver: id1.Address(), + Amount: 100_000 + proto.MinTxnFee*256, // Minimum balance plus fees for all possible inner calls + } + dl.txn(&fund1) + callTxGroup := make([]*txntest.Txn, 16) callTxGroup[0] = &txntest.Txn{ Type: "appl", @@ -1818,6 +1854,9 @@ func TestSelfCheckHoldingNewApp(t *testing.T) { ForeignAssets: []basics.AssetIndex{assetID}, } selfcheck.ApplicationID = dl.txn(&selfcheck).ApplicationID + // remove programs to just call the app + selfcheck.ApprovalProgram = nil + selfcheck.ClearStateProgram = nil dl.txn(&selfcheck) @@ -1867,6 +1906,9 @@ func TestCheckHoldingNewApp(t *testing.T) { ForeignAssets: []basics.AssetIndex{assetID}, } check.ApplicationID = dl.txn(&check).ApplyData.ApplicationID + // remove the programs to just call the app + check.ApprovalProgram = nil + check.ClearStateProgram = nil create := txntest.Txn{ Type: "appl", @@ -2725,6 +2767,8 @@ func TestClearStateInnerPay(t *testing.T) { l := newSimpleLedgerWithConsensusVersion(t, genBalances, test.consensus, cfg) defer l.Close() + proto := config.Consensus[test.consensus] + app0 := txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -2774,8 +2818,8 @@ itxn_submit // Check that addrs[1] got paid during optin, and pay txn is in block ad1 := micros(t, l, addrs[1]) - // paid 3000, but 1000 fee, 2000 bump - require.Equal(t, uint64(2000), ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) + // paid 3000, but MinTxnFee fee, 3000-MinTxnFee bump + require.Equal(t, 3000-proto.MinTxnFee, ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) // InnerTxn in block ([1] position, because followed fund0) require.Len(t, vb.Block().Payset[1].EvalDelta.InnerTxns, 1) require.Equal(t, vb.Block().Payset[1].EvalDelta.InnerTxns[0].Txn.Amount.Raw, uint64(3000)) @@ -2796,16 +2840,16 @@ itxn_submit // The pay only happens if the clear state approves (and it was legal back in V30) if test.approval == "int 1" && test.consensus == protocol.ConsensusV30 { - // had 2000 bump, now paid 2k, charge 1k, left with 3k total bump - require.Equal(t, uint64(3000), ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) + // had (3000-MinTxnFee) bump, now paid 2k, charge MinTxnFee, left with (3000-MinTxnFee)+2000-MinTxnFee = 5000-2*MinTxnFee total bump + require.Equal(t, 5000-2*proto.MinTxnFee, ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) // InnerTxn in block require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, id0) require.Equal(t, vb.Block().Payset[0].Txn.OnCompletion, transactions.ClearStateOC) require.Len(t, vb.Block().Payset[0].EvalDelta.InnerTxns, 1) require.Equal(t, vb.Block().Payset[0].EvalDelta.InnerTxns[0].Txn.Amount.Raw, uint64(2000)) } else { - // Only the fee is paid because pay is "erased", so goes from 2k down to 1k - require.Equal(t, uint64(1000), ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) + // Only the fee is paid because pay is "erased", so goes from (3000-MinTxnFee) down by MinTxnFee = 3000-2*MinTxnFee + require.Equal(t, 3000-2*proto.MinTxnFee, ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw) // no InnerTxn in block require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, id0) require.Equal(t, vb.Block().Payset[0].Txn.OnCompletion, transactions.ClearStateOC) @@ -3703,8 +3747,10 @@ func TestUnfundedSenders(t *testing.T) { // v34 enabled UnfundedSenders var problem string - if ver < 34 { - // In the old days, balances.Move would try to increase the rewardsState on the unfunded account + // In the old days, balances.Move would try to increase the rewardsState on the unfunded account + if ver < 28 { + problem = "transaction had fee 0, which is less than the minimum 1000" + } else if ver < 34 { problem = "balance 0 below min" } for i, e := range ephemeral { @@ -3739,6 +3785,7 @@ func TestAppCallAppDuringInit(t *testing.T) { } // now make a new app that calls it during init + proto := config.Consensus[cv] callInInit := txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -3752,7 +3799,7 @@ func TestAppCallAppDuringInit(t *testing.T) { int 1 `, ForeignApps: []basics.AppIndex{approveID}, - Fee: 2000, // Enough to have the inner fee paid for + Fee: 2 * proto.MinTxnFee, // Enough to have the inner fee paid for } // v34 is the likely version for UnfundedSenders. Change if that doesn't happen. var problem string diff --git a/ledger/boxtxn_test.go b/ledger/boxtxn_test.go index 1d52e3c888..a4f5b1c5d0 100644 --- a/ledger/boxtxn_test.go +++ b/ledger/boxtxn_test.go @@ -346,7 +346,7 @@ func TestBoxCreateAvailability(t *testing.T) { itxn_field TypeEnum itxn_submit - // Now invoke it, so it can intialize (and create the "hello" box) + // Now invoke it, so it can initialize (and create the "hello" box) itxn_begin itxn_field ApplicationID int appl @@ -588,10 +588,33 @@ func TestBoxIOBudgets(t *testing.T) { dl.txn(call.Args("check", "x", "\x00"), "box read budget") // Give a budget over 32768, confirm failure anyway - empties := [32]transactions.BoxRef{} - // These tests skip WellFormed, so the huge Boxes is ok - call.Boxes = append(call.Boxes, empties[:]...) - dl.txn(call.Args("create", "x", "\x80\x01"), "box size too large") // 32769 + // Use a transaction group with 5 txns, each with 8 box refs (except the last one) + // to accumulate enough box references (33 total) for the quota test + txns := make([]*txntest.Txn, 5) + for i := 0; i < 4; i++ { + // Create 8 valid box references to the existing box "x" + boxes := make([]transactions.BoxRef, 8) + for j := 0; j < 8; j++ { + boxes[j] = transactions.BoxRef{Index: 0, Name: []byte("x")} + } + txns[i] = &txntest.Txn{ + Type: "appl", + Sender: addrs[0], + ApplicationID: appID, + Boxes: boxes, + ApplicationArgs: [][]byte{[]byte("check"), []byte("x"), []byte("\x00")}, // box contains zeros + Note: []byte{byte(i)}, // vary the note to make txns unique + } + } + // Last transaction tries to create a box > 32K, which should fail even with enough quota + txns[4] = &txntest.Txn{ + Type: "appl", + Sender: addrs[0], + ApplicationID: appID, + Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("y")}}, // 1 ref, total 33 refs in group + ApplicationArgs: [][]byte{[]byte("create"), []byte("y"), []byte("\x80\x01")}, // 32769 bytes + } + dl.txgroup("box size too large", txns...) }) } @@ -604,6 +627,7 @@ func TestBoxInners(t *testing.T) { ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] // Advance the creatable counter, so we don't have very low app ids that // could be mistaken for indices into ForeignApps. @@ -612,8 +636,8 @@ func TestBoxInners(t *testing.T) { dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}) dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}) - boxID := dl.fundedApp(addrs[0], 4_000_000, boxAppSource) // there are some big boxes made - passID := dl.fundedApp(addrs[0], 120_000, passThruSource) // lowish, show it's not paying for boxes + boxID := dl.fundedApp(addrs[0], 4_000_000, boxAppSource) // there are some big boxes made + passID := dl.fundedApp(addrs[0], 100_000+20*proto.MinTxnFee, passThruSource) // lowish, show it's not paying for boxes call := txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -718,8 +742,26 @@ func TestNewAppBoxCreate(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + t.Run("current", func(t *testing.T) { testNewAppBoxCreate(t, 0) }) + t.Run("tealv9", func(t *testing.T) { testNewAppBoxCreate(t, 9) }) + t.Run("tealv12", func(t *testing.T) { testNewAppBoxCreate(t, 12) }) +} + +func testNewAppBoxCreate(t *testing.T, requestedTealVersion int) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + proto := config.Consensus[cv] + + tealVersion := requestedTealVersion + if tealVersion == 0 { + tealVersion = int(proto.LogicSigVersion) + } + + // Skip for combinations of tealVersion and cv that aren't possible + if uint64(tealVersion) > proto.LogicSigVersion { + t.Skipf("TEAL v%d not available in %s (LogicSigVersion=%d)", tealVersion, cv, proto.LogicSigVersion) + } + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() @@ -731,7 +773,7 @@ func TestNewAppBoxCreate(t *testing.T) { // transaction counter, so it can know what the later create will be, // and compute it's app address. - // 2) a) Use the the predicted appID to name the box ref. + // 2) a) Use the predicted appID to name the box ref. // or b) Use 0 as the app in the box ref, meaning "this app" // or c) EnableUnnamedBoxCreate will allow such a creation if there are empty box refs. @@ -753,10 +795,13 @@ func TestNewAppBoxCreate(t *testing.T) { boxPut := "txn ApplicationArgs 0; int 24; bzero; box_put; int 1;" for _, createSrc := range []string{boxCreate, boxPut} { + // createSrcVer is the versioned source for the current test's TEAL version + createSrcVer := fmt.Sprintf("#pragma version %d\n%s", tealVersion, createSrc) + // doubleSrc tries to create TWO boxes. The second is always named by ApplicationArgs 1 - doubleSrc := createSrc + `txn ApplicationArgs 1; int 24; box_create; pop;` // return result of FIRST box_create - // need to call one inner txn, and have have mbr for itself and inner created app - passID := dl.fundedApp(addrs[0], 201_000, passThruCreator) // Will be used to show inners have same power + doubleSrc := createSrcVer + `txn ApplicationArgs 1; int 24; box_create; pop;` // return result of FIRST box_create + // need to call one inner txn, and have mbr for itself and inner created app + passID := dl.fundedApp(addrs[0], 200_000+proto.MinTxnFee, passThruCreator) // Will be used to show inners have same power // Since we used fundedApp, the next app created would be passID+2. // We'll prefund a whole bunch of the next apps that we can then create @@ -769,19 +814,19 @@ func TestNewAppBoxCreate(t *testing.T) { // Try to create it. It will fail because there's no box ref. (does not increment txncounter) dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}}, "invalid Box reference 0x01") // 2a. Create it with a box ref of the predicted appID dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, ForeignApps: []basics.AppIndex{passID + testTxns + 2}, Boxes: []transactions.BoxRef{{Index: 1, Name: []byte{0x01}}}}) // 2a. Create it with a box ref of the predicted appID (Access list) - if ver >= accessVersion { + if proto.MaxAppAccess > 0 { dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Access: []transactions.ResourceRef{ {App: passID + testTxns + 3}, {Box: transactions.BoxRef{Index: 1, Name: []byte{0x01}}}}}) @@ -789,13 +834,13 @@ func TestNewAppBoxCreate(t *testing.T) { // 2b. Create it with a box ref of 0, which means "this app" dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Boxes: []transactions.BoxRef{{Index: 0, Name: []byte{0x01}}}}) // 2b. Create it with a box ref of 0, which means "this app" (Access List) - if ver >= accessVersion { + if proto.MaxAppAccess > 0 { dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Access: []transactions.ResourceRef{ {Box: transactions.BoxRef{Index: 0, Name: []byte{0x01}}}}}) } @@ -822,12 +867,12 @@ func TestNewAppBoxCreate(t *testing.T) { if ver >= newAppCreateVersion { // 2c. Create it with an empty box ref dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Boxes: []transactions.BoxRef{{}}}) // 2c. Create it with an empty box ref dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Access: []transactions.ResourceRef{{Box: transactions.BoxRef{}}}}) // but you can't do a second create @@ -861,7 +906,7 @@ func TestNewAppBoxCreate(t *testing.T) { } else { // 2c. Doesn't work yet until `newAppCreateVersion` dl.txn(&txntest.Txn{Type: "appl", Sender: addrs[0], - ApprovalProgram: createSrc, ApplicationArgs: [][]byte{{0x01}}, + ApprovalProgram: createSrcVer, ApplicationArgs: [][]byte{{0x01}}, Boxes: []transactions.BoxRef{{}}}, "invalid Box reference 0x01") } diff --git a/ledger/catchpointfilewriter_test.go b/ledger/catchpointfilewriter_test.go index 1ff274c8fd..c7f786a60d 100644 --- a/ledger/catchpointfilewriter_test.go +++ b/ledger/catchpointfilewriter_test.go @@ -943,7 +943,7 @@ func testExactAccountChunk(t *testing.T, proto protocol.ConsensusVersion, extraB var onlineExcludeBefore basics.Round // we added so many blocks that lowestRound is stuck at first state proof, round 240? if normalHorizon := catchpointLookbackHorizonForNextRound(genDBRound, params); normalHorizon <= genLowestRound { - t.Logf("subtest is exercising case where lowestRound from votersTracker is satsified by the existing history") + t.Logf("subtest is exercising case where lowestRound from votersTracker is satisfied by the existing history") require.EqualValues(t, genLowestRound, params.StateProofInterval-params.StateProofVotersLookback) onlineExcludeBefore = 0 require.False(t, longHistory) @@ -1238,7 +1238,7 @@ assert var onlineExcludeBefore basics.Round normalOnlineHorizon := catchpointLookbackHorizonForNextRound(genDBRound, config.Consensus[proto]) if normalOnlineHorizon <= genLowestRound { - t.Logf("lowestRound from votersTracker is satsified by the existing history") + t.Logf("lowestRound from votersTracker is satisfied by the existing history") onlineExcludeBefore = 0 require.False(t, longHistory) } else if normalOnlineHorizon > genLowestRound { diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 7b905c6f6e..bd98d6dffc 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -183,10 +183,7 @@ func (ct *catchpointTracker) initialize(cfg config.Local, paths DirsAndPrefix) { ct.enableGeneratingCatchpointFiles = true } - ct.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength - if cfg.CatchpointFileHistoryLength < -1 { - ct.catchpointFileHistoryLength = -1 - } + ct.catchpointFileHistoryLength = max(cfg.CatchpointFileHistoryLength, -1) } // GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database. diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index a26e0c6f5d..9f8df68b7c 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -1092,16 +1092,19 @@ func TestCatchpointTrackerWaitNotBlocking(t *testing.T) { } // switch context one more time to give the blockqueue syncer to run - time.Sleep(1 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // ensure Ledger.Wait() is non-blocked for all rounds except the last one (due to possible races) for rnd := startRound; rnd < endRound; rnd++ { done := ledger.Wait(rnd) - select { - case <-done: - default: - require.FailNow(t, fmt.Sprintf("Wait(%d) is blocked", rnd)) - } + require.Eventually(t, func() bool { + select { + case <-done: + return true + default: + return false + } + }, 15*time.Millisecond, 1*time.Millisecond, "Wait(%d) is blocked", rnd) } } @@ -1600,7 +1603,7 @@ func TestCatchpointSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) { secondStageRound := basics.Round(36) - // Add blocks that preceed the first catchpoint round. + // Add blocks that precede the first catchpoint round. for i := basics.Round(1); i < secondStageRound; i++ { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ @@ -2030,10 +2033,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { } start := basics.Round(initialBlocksCount) - min := conf.CatchpointInterval - if min < protoParams.CatchpointLookback { - min = protoParams.CatchpointLookback - } + min := max(conf.CatchpointInterval, protoParams.CatchpointLookback) end := basics.Round(min + conf.MaxAcctLookback + 3) // few more rounds to commit and generate the second stage for i := start; i < end; i++ { rewardLevelDelta := crypto.RandUint64() % 5 @@ -2113,7 +2113,7 @@ func TestMakeCatchpointFilePath(t *testing.T) { // deadlock detection) and concurrent reads (from transaction evaluation, stake lookups, etc) can // cause the SQLite implementation in util/db/dbutil.go to retry the function looping over all // tracker commitRound implementations. Since catchpointtracker' commitRound updates a merkle trie's -// DB storage and its in-memory cache, the retry can cause the the balancesTrie's cache to become +// DB storage and its in-memory cache, the retry can cause the balancesTrie's cache to become // corrupted and out of sync with the DB (which uses transaction rollback between retries). The // merkle trie corruption manifests as error log messages like: // - "attempted to add duplicate hash 'X' to merkle trie for account Y" diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 114dd7f91a..e42cfc1a9b 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -253,11 +253,11 @@ const ( CatchpointCatchupStateBlocksDownload // CatchpointCatchupStateSwitch indicates that we're switching to use the downloaded ledger/blocks content CatchpointCatchupStateSwitch - - // catchpointCatchupStateLast is the last entry in the CatchpointCatchupState enumeration. - catchpointCatchupStateLast = CatchpointCatchupStateSwitch ) +// catchpointCatchupStateLast is the last entry in the CatchpointCatchupState enumeration. +const catchpointCatchupStateLast = CatchpointCatchupStateSwitch + // CatchupAccessorClientLedger represents ledger interface needed for catchpoint accessor clients type CatchupAccessorClientLedger interface { BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) @@ -454,7 +454,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex // the following fields are now going to be ignored. We could add these to the database and validate these // later on: - // TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound + // TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound start := time.Now() ledgerProcessstagingcontentCount.Inc(nil) err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go index 26a93d8d2e..9e9971f83b 100644 --- a/ledger/catchupaccessor_test.go +++ b/ledger/catchupaccessor_test.go @@ -51,10 +51,7 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][] last64KIndex = -1 for accounts < accountsCount { // generate a chunk; - chunkSize := accountsCount - accounts - if chunkSize > BalancesPerCatchpointFileChunk { - chunkSize = BalancesPerCatchpointFileChunk - } + chunkSize := min(accountsCount-accounts, BalancesPerCatchpointFileChunk) if accounts >= accountsCount-64*1024 && last64KIndex == -1 { last64KIndex = len(encodedAccountChunks) } diff --git a/ledger/double_test.go b/ledger/double_test.go index 13f5b0f552..151f1dda2a 100644 --- a/ledger/double_test.go +++ b/ledger/double_test.go @@ -72,6 +72,10 @@ func (dl *DoubleLedger) beginBlock() *eval.BlockEvaluator { return dl.eval } +// txn will add a transaction to the current block. If no block is +// currently being built, it will start one, and end it after the +// transaction is added. If a problem is specified, it will be +// expected to fail, and the block will not be ended. func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) (stib *transactions.SignedTxnInBlock) { dl.t.Helper() if dl.eval == nil { diff --git a/ledger/encoded/msgp_gen.go b/ledger/encoded/msgp_gen.go index 58de0bc8a1..129a37aa7a 100644 --- a/ledger/encoded/msgp_gen.go +++ b/ledger/encoded/msgp_gen.go @@ -203,7 +203,6 @@ func (z *BalanceRecordV5) MsgIsZero() bool { func BalanceRecordV5MaxSize() (s int) { s = 1 + 3 + basics.AddressMaxSize() + 3 panic("Unable to determine max size: MaxSize() not implemented for Raw type") - return } // MarshalMsg implements msgp.Marshaler @@ -472,15 +471,6 @@ func (z *BalanceRecordV6) MsgIsZero() bool { func BalanceRecordV6MaxSize() (s int) { s = 1 + 2 + basics.AddressMaxSize() + 2 panic("Unable to determine max size: MaxSize() not implemented for Raw type") - s += 2 - s += msgp.MapHeaderSize - // Adding size of map keys for z.Resources - s += resourcesPerCatchpointFileChunkBackwardCompatible * (msgp.Uint64Size) - // Adding size of map values for z.Resources - s += resourcesPerCatchpointFileChunkBackwardCompatible - panic("Unable to determine max size: MaxSize() not implemented for Raw type") - s += 2 + msgp.BoolSize - return } // MarshalMsg implements msgp.Marshaler @@ -876,7 +866,6 @@ func (z *OnlineAccountRecordV6) MsgIsZero() bool { func OnlineAccountRecordV6MaxSize() (s int) { s = 1 + 5 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 4 + basics.RoundMaxSize() + 5 panic("Unable to determine max size: MaxSize() not implemented for Raw type") - return } // MarshalMsg implements msgp.Marshaler @@ -1020,5 +1009,4 @@ func (z *OnlineRoundParamsRecordV6) MsgIsZero() bool { func OnlineRoundParamsRecordV6MaxSize() (s int) { s = 1 + 4 + basics.RoundMaxSize() + 5 panic("Unable to determine max size: MaxSize() not implemented for Raw type") - return } diff --git a/ledger/eval/appcow.go b/ledger/eval/appcow.go index 83603acb22..c64cc1a939 100644 --- a/ledger/eval/appcow.go +++ b/ledger/eval/appcow.go @@ -93,7 +93,10 @@ type storageDelta struct { action storageAction kvCow stateDelta - counts, maxCounts *basics.StateSchema + // counts represents the number of each value type currently used + counts basics.StateSchema + // maxCounts is the maximum allowed counts (it comes from the app's schema) + maxCounts basics.StateSchema // account index for an address that was first referenced as in app_local_get/app_local_put/app_local_del // this is for backward compatibility with original implementation of applications @@ -125,8 +128,8 @@ func (cb *roundCowState) ensureStorageDelta(addr basics.Address, aidx basics.App lsd = &storageDelta{ action: defaultAction, kvCow: make(stateDelta), - counts: &counts, - maxCounts: &maxCounts, + counts: counts, + maxCounts: maxCounts, } if cb.compatibilityMode && !global { @@ -163,7 +166,7 @@ func (cb *roundCowState) getStorageCounts(addr basics.Address, aidx basics.AppIn aapp := storagePtr{aidx, global} lsd, ok := cb.sdeltas[addr][aapp] if ok { - return *lsd.counts, nil + return lsd.counts, nil } // Otherwise, check our parent @@ -185,7 +188,7 @@ func (cb *roundCowState) getStorageLimits(addr basics.Address, aidx basics.AppIn aapp := storagePtr{aidx, global} lsd, ok := cb.sdeltas[addr][aapp] if ok { - return *lsd.maxCounts, nil + return lsd.maxCounts, nil } // Otherwise, check our parent @@ -241,7 +244,7 @@ func (cb *roundCowState) AllocateApp(addr basics.Address, aidx basics.AppIndex, } lsd.action = allocAction - lsd.maxCounts = &space + lsd.maxCounts = space if global { cb.mods.AddCreatable(basics.CreatableIndex(aidx), ledgercore.ModifiedCreatable{ @@ -271,8 +274,8 @@ func (cb *roundCowState) DeallocateApp(addr basics.Address, aidx basics.AppIndex } lsd.action = deallocAction - lsd.counts = &basics.StateSchema{} - lsd.maxCounts = &basics.StateSchema{} + lsd.counts = basics.StateSchema{} + lsd.maxCounts = basics.StateSchema{} lsd.kvCow = make(stateDelta) if global { diff --git a/ledger/eval/appcow_test.go b/ledger/eval/appcow_test.go index ccced6830d..e94b08a0e4 100644 --- a/ledger/eval/appcow_test.go +++ b/ledger/eval/appcow_test.go @@ -276,8 +276,7 @@ func TestCowStorage(t *testing.T) { } err := cow.AllocateApp(addr, sptr.aidx, sptr.global, rschema) if actuallyAllocated { - require.Error(t, err) - require.Contains(t, err.Error(), "cannot allocate") + require.ErrorContains(t, err, "cannot allocate") } else { require.NoError(t, err) err = st.alloc(aapp, rschema) @@ -294,8 +293,7 @@ func TestCowStorage(t *testing.T) { err := st.dealloc(aapp) require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), "cannot deallocate") + require.ErrorContains(t, err, "cannot deallocate") } } @@ -310,8 +308,7 @@ func TestCowStorage(t *testing.T) { err = st.set(aapp, rkey, rval) require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), "cannot set") + require.ErrorContains(t, err, "cannot set") } } @@ -325,8 +322,7 @@ func TestCowStorage(t *testing.T) { err = st.del(aapp, rkey) require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), "cannot del") + require.ErrorContains(t, err, "cannot del") } } @@ -412,8 +408,7 @@ func TestCowBuildDelta(t *testing.T) { // check global delta cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{} ed, err = cow.buildEvalDelta(1, &txn) - a.Error(err) - a.Contains(err.Error(), "found storage delta for different app") + a.ErrorContains(err, "found storage delta for different app") a.Empty(ed) cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{} @@ -423,16 +418,14 @@ func TestCowBuildDelta(t *testing.T) { cow.sdeltas[creator][storagePtr{aidx + 1, true}] = &storageDelta{} ed, err = cow.buildEvalDelta(aidx, &txn) - a.Error(err) - a.Contains(err.Error(), "found storage delta for different app") + a.ErrorContains(err, "found storage delta for different app") a.Empty(ed) delete(cow.sdeltas[creator], storagePtr{aidx + 1, true}) cow.sdeltas[sender] = make(map[storagePtr]*storageDelta) cow.sdeltas[sender][storagePtr{aidx, true}] = &storageDelta{} ed, err = cow.buildEvalDelta(aidx, &txn) - a.Error(err) - a.Contains(err.Error(), "found more than one global delta") + a.ErrorContains(err, "found more than one global delta") a.Empty(ed) // check local delta @@ -440,8 +433,7 @@ func TestCowBuildDelta(t *testing.T) { cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{} ed, err = cow.buildEvalDelta(aidx, &txn) - a.Error(err) - a.Contains(err.Error(), "invalid Account reference ") + a.ErrorContains(err, "invalid Account reference ") a.Empty(ed) // check v26 behavior for empty deltas @@ -708,10 +700,8 @@ func TestApplyChild(t *testing.T) { emptyStorageDelta := func(action storageAction) storageDelta { return storageDelta{ - action: action, - kvCow: make(stateDelta), - counts: &basics.StateSchema{}, - maxCounts: &basics.StateSchema{}, + action: action, + kvCow: make(stateDelta), } } getSchema := func(u, b int) basics.StateSchema { @@ -722,9 +712,9 @@ func TestApplyChild(t *testing.T) { child := emptyStorageDelta(0) chkEmpty := func(delta *storageDelta) { - a.Empty(delta.action) - a.Empty(*delta.counts) - a.Empty(*delta.maxCounts) + a.Zero(delta.action) + a.Zero(delta.counts) + a.Zero(delta.maxCounts) a.Zero(len(delta.kvCow)) } @@ -738,15 +728,13 @@ func TestApplyChild(t *testing.T) { // check child overwrites values child.action = allocAction - s1 := getSchema(1, 2) - s2 := getSchema(3, 4) - child.counts = &s1 - child.maxCounts = &s2 + child.counts = getSchema(1, 2) + child.maxCounts = getSchema(3, 4) parent.applyChild(&child) a.Equal(allocAction, parent.action) a.Equal(1, len(parent.kvCow)) - a.Equal(getSchema(1, 2), *parent.counts) - a.Equal(getSchema(3, 4), *parent.maxCounts) + a.Equal(getSchema(1, 2), parent.counts) + a.Equal(getSchema(3, 4), parent.maxCounts) // check child is correctly merged into parent empty := func() valueDelta { @@ -817,18 +805,17 @@ func TestApplyChild(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { parent := emptyStorageDelta(0) - ps := getSchema(len(test.pkv), 0) - parent.counts = &ps + parent.counts = getSchema(len(test.pkv), 0) parent.kvCow = test.pkv child := emptyStorageDelta(remainAllocAction) cs := getSchema(len(test.ckv)+len(test.pkv), 0) - child.counts = &cs + child.counts = cs child.kvCow = test.ckv parent.applyChild(&child) a.Equal(test.result, parent.kvCow) - a.Equal(cs, *parent.counts) + a.Equal(cs, parent.counts) }) } } @@ -1126,9 +1113,8 @@ func TestCowGetKey(t *testing.T) { addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } _, ok, err := c.getKey(addr, aidx, true, "gkey", 0) - a.Error(err) a.False(ok) - a.Contains(err.Error(), "cannot fetch key") + a.ErrorContains(err, "cannot fetch key") c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: {storagePtr{aidx, true}: &storageDelta{action: allocAction}}, @@ -1201,15 +1187,13 @@ func TestCowSetKey(t *testing.T) { val := "val" tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val} err := c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "key too long") + a.ErrorContains(err, "key too long") key = "key" val = strings.Repeat("val", 100) tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val} err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "value too long") + a.ErrorContains(err, "value too long") val = "val" tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val} @@ -1217,30 +1201,21 @@ func TestCowSetKey(t *testing.T) { addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "cannot set key") + a.ErrorContains(err, "cannot set key") - counts := basics.StateSchema{} - maxCounts := basics.StateSchema{} c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: { storagePtr{aidx, true}: &storageDelta{ - action: allocAction, - kvCow: make(stateDelta), - counts: &counts, - maxCounts: &maxCounts, + action: allocAction, + kvCow: make(stateDelta), }, }, } err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "exceeds schema bytes") + a.ErrorContains(err, "exceeds schema bytes") - counts = basics.StateSchema{NumUint: 1} - maxCounts = basics.StateSchema{NumByteSlice: 1} - err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "exceeds schema integer") + err = c.setKey(addr, aidx, true, key, basics.TealValue{Type: basics.TealUintType}, 0) + a.ErrorContains(err, "exceeds schema integer") tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 1} c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ @@ -1248,16 +1223,14 @@ func TestCowSetKey(t *testing.T) { storagePtr{aidx, true}: &storageDelta{ action: allocAction, kvCow: stateDelta{key: valueDelta{new: tv2, newExists: true}}, - counts: &counts, - maxCounts: &maxCounts, + counts: basics.StateSchema{NumUint: 1}, + maxCounts: basics.StateSchema{NumByteSlice: 1}, }, }, } err = c.setKey(addr, aidx, true, key, tv, 0) a.NoError(err) - counts = basics.StateSchema{NumUint: 1} - maxCounts = basics.StateSchema{NumByteSlice: 1, NumUint: 1} err = c.setKey(addr, aidx, true, key, tv, 0) a.NoError(err) @@ -1268,8 +1241,8 @@ func TestCowSetKey(t *testing.T) { storagePtr{aidx, false}: &storageDelta{ action: allocAction, kvCow: stateDelta{key: valueDelta{new: tv2, newExists: true}}, - counts: &counts, - maxCounts: &maxCounts, + counts: basics.StateSchema{NumUint: 1}, + maxCounts: basics.StateSchema{NumByteSlice: 1, NumUint: 1}, }, }, } @@ -1298,22 +1271,19 @@ func TestCowSetKeyVFuture(t *testing.T) { val := "val" tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val} err := c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "key too long") + a.ErrorContains(err, "key too long") key = "key" val = strings.Repeat("val", 100) tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val} err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "value too long") + a.ErrorContains(err, "value too long") key = strings.Repeat("k", protoF.MaxAppKeyLen) val = strings.Repeat("v", protoF.MaxAppSumKeyValueLens-len(key)+1) tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val} err = c.setKey(addr, aidx, true, key, tv, 0) - a.Error(err) - a.Contains(err.Error(), "key/value total too long") + a.ErrorContains(err, "key/value total too long") } func TestCowAccountIdx(t *testing.T) { @@ -1344,16 +1314,12 @@ func TestCowAccountIdx(t *testing.T) { a.NoError(err) a.Equal(uint64(123), sd.accountIdx) - counts := basics.StateSchema{} - maxCounts := basics.StateSchema{} for _, global := range []bool{false, true} { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: { storagePtr{aidx, global}: &storageDelta{ action: allocAction, kvCow: stateDelta{key: valueDelta{new: tv, newExists: true}}, - counts: &counts, - maxCounts: &maxCounts, accountIdx: 123, }, }, @@ -1380,18 +1346,13 @@ func TestCowDelKey(t *testing.T) { addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } err := c.delKey(addr, aidx, true, key, 0) - a.Error(err) - a.Contains(err.Error(), "cannot del key") + a.ErrorContains(err, "cannot del key") - counts := basics.StateSchema{} - maxCounts := basics.StateSchema{} c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: { storagePtr{aidx, true}: &storageDelta{ - action: allocAction, - kvCow: make(stateDelta), - counts: &counts, - maxCounts: &maxCounts, + action: allocAction, + kvCow: make(stateDelta), }, }, } @@ -1401,10 +1362,8 @@ func TestCowDelKey(t *testing.T) { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: { storagePtr{aidx, false}: &storageDelta{ - action: allocAction, - kvCow: make(stateDelta), - counts: &counts, - maxCounts: &maxCounts, + action: allocAction, + kvCow: make(stateDelta), }, }, } diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go index a0f64a9438..6016eae39d 100644 --- a/ledger/eval/eval.go +++ b/ledger/eval/eval.go @@ -919,6 +919,11 @@ func (eval *BlockEvaluator) Round() basics.Round { return eval.block.Round() } +// ConsensusParams returns the consensus parameters for the block being evaluated. +func (eval *BlockEvaluator) ConsensusParams() config.ConsensusParams { + return eval.proto +} + // ResetTxnBytes resets the number of bytes tracked by the BlockEvaluator to // zero. This is a specialized operation used by the transaction pool to // simulate the effect of putting pending transactions in multiple blocks. @@ -1184,6 +1189,12 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * return err } + err = txn.Txn.WellFormed(eval.specials, eval.proto) + if err != nil { + txnErr := ledgercore.TxnNotWellFormedError(fmt.Sprintf("transaction %v: malformed: %v", txn.ID(), err)) + return &txnErr + } + // Transaction already in the ledger? err = cow.checkDup(txn.Txn.FirstValid, txn.Txn.LastValid, txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease}) if err != nil { diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go index c66cc9e182..fe44c0a6aa 100644 --- a/ledger/eval/eval_test.go +++ b/ledger/eval/eval_test.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "maps" "math/rand" "testing" @@ -48,12 +49,6 @@ import ( var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36} -var minFee basics.MicroAlgos - -func init() { - params := config.Consensus[protocol.ConsensusCurrentVersion] - minFee = basics.MicroAlgos{Raw: params.MinTxnFee} -} func TestBlockEvaluatorFeeSink(t *testing.T) { partitiontest.PartitionTest(t) @@ -117,7 +112,7 @@ ok: genHash := l.GenesisHash() header := transactions.Header{ Sender: addrs[0], - Fee: minFee, + Fee: basics.MicroAlgos{Raw: eval.proto.MinTxnFee}, FirstValid: newBlock.Round(), LastValid: newBlock.Round(), GenesisHash: genHash, @@ -300,6 +295,7 @@ func TestTransactionGroupWithTracer(t *testing.T) { eval.validate = true eval.generate = true + minFee := basics.MicroAlgos{Raw: eval.proto.MinTxnFee} genHash := l.GenesisHash() var basicAppCallReturn string @@ -659,7 +655,7 @@ func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uin Type: protocol.PaymentTx, Header: transactions.Header{ Sender: addrs[0], - Fee: minFee, + Fee: basics.MicroAlgos{Raw: eval.proto.MinTxnFee}, FirstValid: newBlock.Round(), LastValid: newBlock.Round(), GenesisHash: testnetGenesisHash, @@ -886,9 +882,7 @@ func (ledger *evalTestLedger) AddValidatedBlock(vb ledgercore.ValidatedBlock, ce newBalances := make(map[basics.Address]basics.AccountData) // copy the previous balances. - for k, v := range ledger.roundBalances[vb.Block().Round()-1] { - newBalances[k] = v - } + maps.Copy(newBalances, ledger.roundBalances[vb.Block().Round()-1]) // update deltas := vb.Delta() @@ -1410,7 +1404,8 @@ func TestAbsenteeChecks(t *testing.T) { Type: protocol.PaymentTx, Header: transactions.Header{ Sender: addrs[i], - Fee: minFee, + Fee: basics.MicroAlgos{Raw: blkEval.proto.MinTxnFee}, + FirstValid: blkEval.Round().SubSaturate(1000), LastValid: blkEval.Round(), GenesisHash: l.GenesisHash(), }, diff --git a/ledger/eval/prefetcher/prefetcher_test.go b/ledger/eval/prefetcher/prefetcher_test.go index 4e7de01954..b47cd7648a 100644 --- a/ledger/eval/prefetcher/prefetcher_test.go +++ b/ledger/eval/prefetcher/prefetcher_test.go @@ -18,7 +18,6 @@ package prefetcher_test import ( "context" - "errors" "testing" "github.com/stretchr/testify/require" @@ -637,7 +636,7 @@ func TestAssetLookupError(t *testing.T) { if loadedTxnGroup.Err != nil { errorReceived = true require.Equal(t, int64(2), loadedTxnGroup.Err.GroupIdx) - require.True(t, errors.Is(loadedTxnGroup.Err, assetLookupError{})) + require.ErrorIs(t, loadedTxnGroup.Err, assetLookupError{}) require.Equal(t, makeAddress(2), *loadedTxnGroup.Err.Address) require.Equal(t, errorTriggerAssetIndex, int(loadedTxnGroup.Err.CreatableIndex)) require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType) @@ -693,7 +692,7 @@ func TestGetCreatorForRoundError(t *testing.T) { receivedNumGroups++ if loadedTxnGroup.Err != nil { errorReceived = true - require.True(t, errors.Is(loadedTxnGroup.Err, getCreatorError{})) + require.ErrorIs(t, loadedTxnGroup.Err, getCreatorError{}) require.Nil(t, loadedTxnGroup.Err.Address) require.Equal(t, errorTriggerCreatableIndex, int(loadedTxnGroup.Err.CreatableIndex)) require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType) @@ -750,7 +749,7 @@ func TestLookupWithoutRewards(t *testing.T) { receivedNumGroups++ if loadedTxnGroup.Err != nil { errorReceived = true - require.True(t, errors.Is(loadedTxnGroup.Err, lookupError{})) + require.ErrorIs(t, loadedTxnGroup.Err, lookupError{}) require.Equal(t, makeAddress(10), *loadedTxnGroup.Err.Address) require.Equal(t, 0, int(loadedTxnGroup.Err.CreatableIndex)) require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType) diff --git a/ledger/eval/txntracer_test.go b/ledger/eval/txntracer_test.go index 80befccde5..ca00414dba 100644 --- a/ledger/eval/txntracer_test.go +++ b/ledger/eval/txntracer_test.go @@ -99,6 +99,7 @@ func TestTransactionGroupWithDeltaTracer(t *testing.T) { eval.validate = true eval.generate = true genHash := l.GenesisHash() + minFee := basics.MicroAlgos{Raw: eval.proto.MinTxnFee} basicAppCallApproval := `#pragma version 8 byte "hellobox" @@ -231,13 +232,39 @@ int 1` err = eval.TransactionGroup(secondTxGroup) require.NoError(t, err) + // Calculate expected balances dynamically based on proto.MinTxnFee + // Genesis gives each address: 5_000_000_000_000_000 / 3 ≈ 1_666_666_666_666_666 + genesisBalance := uint64(5_000_000_000_000_000 / 3) + + // addrs[0]: paid 3 fees (basicAppCall, innerAppCall, innerBoxAppCall) + expectedAddr0 := genesisBalance - 3*proto.MinTxnFee + + // testSinkAddr: received fees from ALL transactions in the round + // (4 outer + 2 inner from first group + 1 from secondPayTxn = 7 total) + // Note: Fee sink accumulates for the entire round, unlike other accounts which are per-group + expectedSink := genesisBalance + 7*proto.MinTxnFee + + // addrs[2]: received 1_000_000 from payTxn (in this transaction group) + // Note: secondPayTxn is in a separate group and affects addrs[2], but the delta we're + // testing here (actualDelta from txgroup[0]) only includes the first group's effects + expectedAddr2 := genesisBalance + 1_000_000 + + // addrs[3]: received CloseRemainderTo from addrs[1] + // addrs[1]: started with genesisBalance, paid 1 fee (payTxn), sent 1_000_000, remainder to addrs[3] + expectedAddr3 := genesisBalance + (genesisBalance - proto.MinTxnFee - 1_000_000) + + // innerAppAddress: started with 1_000_000, spawned 3 inner txns (1 app call + 2 payments) + // Each inner txn pays its own fee from the innerAppAddress balance + // Total fees: 3 * proto.MinTxnFee + expectedInnerApp := uint64(1_000_000) - 3*proto.MinTxnFee + expectedAccts := ledgercore.AccountDeltas{ Accts: []ledgercore.BalanceRecord{ { Addr: addrs[0], AccountData: ledgercore.AccountData{ AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 1666666666663666}, + MicroAlgos: basics.MicroAlgos{Raw: expectedAddr0}, TotalAppParams: 3, }, }, @@ -247,7 +274,7 @@ int 1` AccountData: ledgercore.AccountData{ AccountBaseData: ledgercore.AccountBaseData{ Status: basics.Status(2), - MicroAlgos: basics.MicroAlgos{Raw: 1666666666673666}, + MicroAlgos: basics.MicroAlgos{Raw: expectedSink}, }, }, }, @@ -268,7 +295,7 @@ int 1` Addr: addrs[2], AccountData: ledgercore.AccountData{ AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 1666666667666666}, + MicroAlgos: basics.MicroAlgos{Raw: expectedAddr2}, }, }, }, @@ -276,7 +303,7 @@ int 1` Addr: addrs[3], AccountData: ledgercore.AccountData{ AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 3333333332332332}, + MicroAlgos: basics.MicroAlgos{Raw: expectedAddr3}, }, }, }, @@ -284,7 +311,7 @@ int 1` Addr: innerAppAddress, AccountData: ledgercore.AccountData{ AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 997000}, + MicroAlgos: basics.MicroAlgos{Raw: expectedInnerApp}, TotalAppParams: 1, }, }, diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index dfe051b578..cb650bfe56 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -223,10 +223,19 @@ func TestPayoutFees(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - // Lots of balance checks that would be messed up by rewards - genBalances, addrs, _ := ledgertesting.NewTestGenesis(ledgertesting.TurnOffRewards) payoutsBegin := 40 ledgertesting.TestConsensusRange(t, payoutsBegin-1, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + proto := config.Consensus[cv] + // Lots of balance checks that would be messed up by rewards + opts := []ledgertesting.TestGenesisOption{ledgertesting.TurnOffRewards} + // When payouts are enabled, set a starting feesink balance to ensure it drains by the end of the test + if ver >= payoutsBegin { + initialBalance := uint64(19_000_000 - 2*proto.MinTxnFee) + opts = append(opts, ledgertesting.InitialFeeSinkBalance(initialBalance)) + } + + // Create genesis with the appropriate options + genBalances, addrs, _ := ledgertesting.NewTestGenesis(opts...) dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() @@ -241,13 +250,14 @@ func TestPayoutFees(t *testing.T) { require.False(t, prp.IncentiveEligible) dl.txn(&txntest.Txn{ - Type: "keyreg", - Sender: proposer, - Fee: eFee, - VotePK: crypto.OneTimeSignatureVerifier{0x01}, - SelectionPK: crypto.VRFVerifier{0x02}, - StateProofPK: merklesignature.Commitment{0x03}, - VoteFirst: 1, VoteLast: 1000, + Type: "keyreg", + Sender: proposer, + Fee: eFee, + VotePK: crypto.OneTimeSignatureVerifier{0x01}, + SelectionPK: crypto.VRFVerifier{0x02}, + StateProofPK: merklesignature.Commitment{0x03}, + VoteKeyDilution: 1000, + VoteFirst: 1, VoteLast: 1000, }) prp = lookup(dl.t, dl.generator, proposer) @@ -266,7 +276,7 @@ func TestPayoutFees(t *testing.T) { Receiver: addrs[2], Amount: 100000, } - dl.txns(&pay, pay.Args("again")) + dl.txns(&pay, pay.Noted("again")) vb := dl.endBlock(proposer) postsink := micros(dl.t, dl.generator, genBalances.FeeSink) @@ -281,17 +291,17 @@ func TestPayoutFees(t *testing.T) { require.True(t, dl.generator.GenesisProto().Payouts.Enabled) // version sanity check require.NotZero(t, dl.generator.GenesisProto().Payouts.Percent) // version sanity check // new fields are in the header - require.EqualValues(t, 2000, vb.Block().FeesCollected.Raw) + require.EqualValues(t, 2*proto.MinTxnFee, vb.Block().FeesCollected.Raw) require.EqualValues(t, bonus1, vb.Block().Bonus.Raw) - require.EqualValues(t, bonus1+1_000, vb.Block().ProposerPayout().Raw) + require.EqualValues(t, bonus1+proto.MinTxnFee, vb.Block().ProposerPayout().Raw) // This last one is really only testing the "fake" agreement that // happens in dl.endBlock(). require.EqualValues(t, proposer, vb.Block().Proposer()) // At the end of the block, part of the fees + bonus have been moved to // the proposer. - require.EqualValues(t, bonus1+1_000, postprop-preprop) // based on 75% in config/consensus.go - require.EqualValues(t, bonus1-1_000, presink-postsink) + require.EqualValues(t, bonus1+proto.MinTxnFee, postprop-preprop) // based on 75% in config/consensus.go + require.EqualValues(t, bonus1-proto.MinTxnFee, presink-postsink) require.Equal(t, prp.LastProposed, dl.generator.Latest()) } else { require.False(t, dl.generator.GenesisProto().Payouts.Enabled) @@ -318,13 +328,9 @@ func TestPayoutFees(t *testing.T) { // Get the feesink down low, then drain it by proposing. feesink := vb.Block().FeeSink - data := lookup(t, dl.generator, feesink) - dl.txn(&txntest.Txn{ - Type: "pay", - Sender: feesink, - Receiver: addrs[1], - Amount: data.MicroAlgos.Raw - 12_000_000, - }) + dl.beginBlock() + dl.endBlock() + dl.beginBlock() dl.endBlock(proposer) require.EqualValues(t, micros(t, dl.generator, feesink), 2_000_000) @@ -378,11 +384,12 @@ func TestIncentiveEligible(t *testing.T) { // Keyreg to get online with various fees. Sufficient fee gets `smallest` eligible keyreg := txntest.Txn{ - Type: "keyreg", - VotePK: crypto.OneTimeSignatureVerifier{0x01}, - SelectionPK: crypto.VRFVerifier{0x02}, - StateProofPK: merklesignature.Commitment{0x03}, - VoteFirst: 1, VoteLast: 1000, + Type: "keyreg", + VotePK: crypto.OneTimeSignatureVerifier{0x01}, + SelectionPK: crypto.VRFVerifier{0x02}, + StateProofPK: merklesignature.Commitment{0x03}, + VoteKeyDilution: 1000, + VoteFirst: 1, VoteLast: 1000, } tooSmallKR := keyreg tooSmallKR.Sender = tooSmall @@ -485,11 +492,15 @@ func TestAbsentTracking(t *testing.T) { // have addrs[1] go online explicitly, which makes it eligible for suspension. // use a large fee, so we can see IncentiveEligible change vb := dl.fullBlock(&txntest.Txn{ // #2 - Type: "keyreg", - Fee: 10_000_000, - Sender: addrs[1], - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, + Type: "keyreg", + Fee: 10_000_000, + Sender: addrs[1], + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + VoteKeyDilution: 1, + StateProofPK: merklesignature.Commitment{1}, + VoteFirst: 1, + VoteLast: 1000, }) addr1Keyreg := vb.Block().Round() require.EqualValues(t, 2, addr1Keyreg) // sanity check @@ -500,11 +511,14 @@ func TestAbsentTracking(t *testing.T) { require.True(t, lookup(t, dl.generator, addrs[0]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[1]).Status == basics.Online) require.False(t, lookup(t, dl.generator, addrs[2]).Status == basics.Online) - checkState(addrs[0], true, false, 833_333_333_333_333) // #3 - require.Equal(t, int(lookup(t, dl.generator, addrs[0]).MicroAlgos.Raw), 833_333_333_332_333) + proto := config.Consensus[cv] + initialBalance := uint64(833_333_333_333_333) + checkState(addrs[0], true, false, initialBalance) // #3 + // addr[0] paid one MinTxnFee for checkState transaction + require.Equal(t, int(lookup(t, dl.generator, addrs[0]).MicroAlgos.Raw), int(initialBalance-proto.MinTxnFee)) // although addr[1] just paid to be eligible, it won't be for 320 rounds - checkState(addrs[1], true, false, 833_333_333_333_333) // #4 - checkState(addrs[2], false, false, 0) // #5 + checkState(addrs[1], true, false, initialBalance) // #4 + checkState(addrs[2], false, false, 0) // #5 // genesis accounts don't begin IncentiveEligible, even if online require.False(t, lookup(t, dl.generator, addrs[0]).IncentiveEligible) @@ -538,7 +552,7 @@ func TestAbsentTracking(t *testing.T) { newtotals, err := dl.generator.Totals(dl.generator.Latest()) require.NoError(t, err) // payment and fee left the online account - require.Equal(t, totals.Online.Money.Raw-100_000-1000, newtotals.Online.Money.Raw) + require.Equal(t, totals.Online.Money.Raw-100_000-proto.MinTxnFee, newtotals.Online.Money.Raw) totals = newtotals printAbsent(dl.fullBlock()) @@ -558,10 +572,12 @@ func TestAbsentTracking(t *testing.T) { // ONLINE keyreg without extra fee vb = dl.fullBlock(&txntest.Txn{ - Type: "keyreg", - Sender: addrs[2], - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, + Type: "keyreg", + Sender: addrs[2], + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + VoteKeyDilution: 1, + StateProofPK: merklesignature.Commitment{1}, }) // #10 printAbsent(vb) // online totals have grown, addr[2] was added @@ -574,20 +590,22 @@ func TestAbsentTracking(t *testing.T) { require.True(t, regger.Status == basics.Online) // But nothing has changed for voter_params_get, since we're not past 320 - checkState(addrs[0], true, false, 833_333_333_333_333) // #11 - checkState(addrs[1], true, false, 833_333_333_333_333) // #12 - checkState(addrs[2], false, false, 0) // #13 + checkState(addrs[0], true, false, initialBalance) // #11 + checkState(addrs[1], true, false, initialBalance) // #12 + checkState(addrs[2], false, false, 0) // #13 require.NotZero(t, regger.LastHeartbeat) // online keyreg caused update require.False(t, regger.IncentiveEligible) // ONLINE keyreg with extra fee vb = dl.fullBlock(&txntest.Txn{ - Type: "keyreg", - Fee: 2_000_000, - Sender: addrs[2], - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, + Type: "keyreg", + Fee: 2_000_000, + Sender: addrs[2], + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + VoteKeyDilution: 1, + StateProofPK: merklesignature.Commitment{1}, }) // #14 printAbsent(vb) addr2Eligible := vb.Block().Round() @@ -630,20 +648,21 @@ func TestAbsentTracking(t *testing.T) { if rnd < 100 { // `vote_params_get` sees no changes in the early going, because it looks back 320 - checkState(addrs[1], true, false, 833_333_333_333_333) // this also advances a round! + checkState(addrs[1], true, false, initialBalance) // this also advances a round! // to avoid complications from advancing an extra round, we only do this check for 100 rounds } // addr[1] spent 10A on a fee in rnd 1, so online stake and eligibility adjusted in 323 if rnd == addr1Keyreg-2+lookback { - checkState(addrs[1], true, false, 833_333_333_333_333) // check occurs during reg+lookback-1 - checkState(addrs[1], true, true, 833_333_323_333_333) // check occurs during reg+lookback + checkState(addrs[1], true, false, initialBalance) // check occurs during reg+lookback-1 + checkState(addrs[1], true, true, initialBalance-10_000_000) // check occurs during reg+lookback } // watch the change across the round that addr2 becomes eligible (by spending 2A in keyreg) + // addr[2] received 100_000 from addr[1] at line 531, and paid fees for multiple checkState and keyreg txns if rnd == addr2Eligible-2+lookback { - checkState(addrs[2], true, false, 833_333_333_429_333) - checkState(addrs[2], true, true, 833_333_331_429_333) // after keyreg w/ 2A is effective + checkState(addrs[2], true, false, initialBalance+100_000-4*proto.MinTxnFee) + checkState(addrs[2], true, true, initialBalance+100_000-4*proto.MinTxnFee-2_000_000) // after keyreg w/ 2A is effective } if rnd > 20+lookback+skip { @@ -674,14 +693,35 @@ func TestAbsentTracking(t *testing.T) { // observe addr1 stake going to zero 320 rounds after knockoff if rnd == addr1off+lookback-2 { - checkState(addrs[1], true, true, 833_333_323_188_333) + // addr[1]: initial - 10M fee (keyreg) - 100k payment - MinTxnFee (pay) - checkState fees + // checkState calls for addrs[1]: + // - 1 at round 4 (line 510) + // - 1 at round 12 (line 582) + // - 40-41 from loop at line 637, rounds 20-98, called every other round (line advances after checkState) + // - 2 at rounds 320-321 (lines 643-644) + // Total: 44-45 checkState calls. Before this checkState call, 44 fees have been paid. + numCheckStates := uint64(44) + checkState(addrs[1], true, true, initialBalance-10_000_000-100_000-proto.MinTxnFee-numCheckStates*proto.MinTxnFee) checkState(addrs[1], false, false, 0) addr1check = true } // observe addr2 stake going to zero 320 rounds after knockoff if rnd == addr2off+lookback-2 { - checkState(addrs[2], true, true, 833_333_331_427_333) // still "online" + // addr[2]: initial + 100k received - 2M fee (keyreg with high fee) - MinTxnFee (keyreg #10) - checkState fees + // checkState fees: 1 at round 5 (line 511) + // 80 from rounds 20-99 (line 637, addr[2] not checked there) + // 2 at addr2Eligible+lookback-2 and addr2Eligible+lookback-1 (lines 650-651) + // Note: addr[2] also pays MinTxnFee for the offline keyreg at line 551 + // Actually, let's count addr[2] checkState calls more carefully: + // - Line 511: 1 checkState + // - Line 583: 1 checkState + // - Lines 650-651: 2 checkState + // Total checkState fees before this call: 4 + // Plus 2 keyreg fees: offline keyreg (line 551) and online keyreg #10 (line 564) + numCheckStates := uint64(4) + numKeyregs := uint64(2) // offline keyreg at 551, online keyreg at 564 + checkState(addrs[2], true, true, initialBalance+100_000-2_000_000-numKeyregs*proto.MinTxnFee-numCheckStates*proto.MinTxnFee) // still "online" checkState(addrs[2], false, false, 0) addr2check = true } @@ -694,7 +734,8 @@ func TestAbsentTracking(t *testing.T) { require.True(t, addr1check) require.True(t, addr2check) - checkState(addrs[0], true, false, 833_333_333_331_333) // addr 0 didn't get suspended (genesis) + // addr[0]: initial - 2 checkState fees (lines 506, 581) + checkState(addrs[0], true, false, initialBalance-2*proto.MinTxnFee) // addr 0 didn't get suspended (genesis) } ledgertesting.TestConsensusRange(t, checkingBegins, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { @@ -737,11 +778,13 @@ func TestAbsenteeChallenges(t *testing.T) { Receiver: guy, Amount: 10_000_000, }, &txntest.Txn{ - Type: "keyreg", - Fee: 5_000_000, // enough to be incentive eligible - Sender: guy, - VotePK: [32]byte{byte(i + 1)}, - SelectionPK: [32]byte{byte(i + 1)}, + Type: "keyreg", + Fee: 5_000_000, // enough to be incentive eligible + Sender: guy, + VotePK: [32]byte{byte(i + 1)}, + SelectionPK: [32]byte{byte(i + 1)}, + VoteKeyDilution: uint64(i + 1), + StateProofPK: merklesignature.Commitment{byte(i + 1)}, }) acct := lookup(t, dl.generator, guy) require.Equal(t, basics.Online, acct.Status) @@ -768,10 +811,12 @@ func TestAbsenteeChallenges(t *testing.T) { // regguy keyregs before he's caught, which is a heartbeat, he stays on as well vb := dl.fullBlock(&txntest.Txn{ - Type: "keyreg", // Does not pay extra fee, since he's still eligible - Sender: regguy, - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, + Type: "keyreg", // Does not pay extra fee, since he's still eligible + Sender: regguy, + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + VoteKeyDilution: 1, + StateProofPK: merklesignature.Commitment{1}, }) require.Equal(t, basics.Round(1200), vb.Block().Round()) require.Empty(t, vb.Block().AbsentParticipationAccounts) @@ -834,10 +879,7 @@ func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { }) payoutsBegin := 40 - // txn to send in round 1, to change the balances to be different from genesis - payTxn := &txntest.Txn{Type: "pay", Sender: addrs[1], Receiver: addrs[2], Amount: 1_000_000} - - checkAccts := func(l *Ledger, rnd basics.Round, cv protocol.ConsensusVersion) { + checkAccts := func(l *Ledger, rnd basics.Round, cv protocol.ConsensusVersion, payTxnSender, payTxnReceiver basics.Address, payTxnAmount uint64) { accts, err := l.GetKnockOfflineCandidates(rnd, config.Consensus[cv]) require.NoError(t, err) require.NotEmpty(t, accts) @@ -854,20 +896,19 @@ func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { } } - // calculate expected balances after applying payTxn - payTxnReceiver := afterPayTxnOnlineAccts[payTxn.Receiver] - payTxnReceiver.MicroAlgosWithRewards.Raw += payTxn.Amount - payTxnSender := afterPayTxnOnlineAccts[payTxn.Sender] - payTxnSender.MicroAlgosWithRewards.Raw -= (payTxn.Amount + config.Consensus[cv].MinTxnFee) - afterPayTxnOnlineAccts[payTxn.Receiver] = payTxnReceiver - afterPayTxnOnlineAccts[payTxn.Sender] = payTxnSender - require.Equal(t, onlineCount, onlineCnt) require.Len(t, accts, onlineCnt) if rnd == 0 { // balances should be same as genesis require.Equal(t, genesisOnlineAccts, accts) } else { + // calculate expected balances after applying payTxn + receiver := afterPayTxnOnlineAccts[payTxnReceiver] + receiver.MicroAlgosWithRewards.Raw += payTxnAmount + sender := afterPayTxnOnlineAccts[payTxnSender] + sender.MicroAlgosWithRewards.Raw -= (payTxnAmount + config.Consensus[cv].MinTxnFee) + afterPayTxnOnlineAccts[payTxnReceiver] = receiver + afterPayTxnOnlineAccts[payTxnSender] = sender // balances > rnd 1 should reflect payTxn change require.Equal(t, afterPayTxnOnlineAccts, accts, "rnd %d", rnd) } @@ -878,11 +919,12 @@ func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() - checkAccts(dl.generator, basics.Round(0), cv) - checkAccts(dl.validator, basics.Round(0), cv) + checkAccts(dl.generator, basics.Round(0), cv, basics.Address{}, basics.Address{}, 0) + checkAccts(dl.validator, basics.Round(0), cv, basics.Address{}, basics.Address{}, 0) // change two accounts' balances to be different from genesis - payTxn.GenesisHash = crypto.Digest{} // clear if set from previous run + // Create the pay transaction fresh for each consensus version so it uses the correct fee + payTxn := &txntest.Txn{Type: "pay", Sender: addrs[1], Receiver: addrs[2], Amount: 1_000_000} dl.fullBlock(payTxn) // run up to round 240 @@ -890,8 +932,8 @@ func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { upToRound := basics.Round(proto.StateProofInterval - proto.StateProofVotersLookback) require.Equal(t, basics.Round(240), upToRound) for rnd := dl.fullBlock().Block().Round(); rnd < upToRound; rnd = dl.fullBlock().Block().Round() { - checkAccts(dl.generator, rnd, cv) - checkAccts(dl.validator, rnd, cv) + checkAccts(dl.generator, rnd, cv, addrs[1], addrs[2], 1_000_000) + checkAccts(dl.validator, rnd, cv, addrs[1], addrs[2], 1_000_000) } }) } @@ -914,6 +956,8 @@ func TestVoterAccess(t *testing.T) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() + proto := config.Consensus[cv] + initialBalance := uint64(833_333_333_333_333) stib := dl.txn(&txntest.Txn{ Type: "appl", Sender: addrs[0], @@ -924,11 +968,14 @@ func TestVoterAccess(t *testing.T) { // have addrs[1] go online, though it won't be visible right away dl.txn(&txntest.Txn{ - Type: "keyreg", - Sender: addrs[1], - VotePK: [32]byte{0xaa}, - SelectionPK: [32]byte{0xbb}, + Type: "keyreg", + Sender: addrs[1], + VotePK: [32]byte{0xaa}, + SelectionPK: [32]byte{0xbb}, + VoteKeyDilution: 1000, + StateProofPK: merklesignature.Commitment{0xcc}, }) + roundAfterAddr1Keyreg := dl.generator.Latest() one := basics.Address{0xaa, 0x11} two := basics.Address{0xaa, 0x22} @@ -956,11 +1003,11 @@ func TestVoterAccess(t *testing.T) { require.Equal(t, int(total), int(binary.BigEndian.Uint64([]byte(logs[2])))) } - checkState(addrs[0], true, 833_333_333_333_333, 833_333_333_333_333) + checkState(addrs[0], true, initialBalance, initialBalance) // checking again because addrs[0] just paid a fee, but we show online balance hasn't changed yet - checkState(addrs[0], true, 833_333_333_333_333, 833_333_333_333_333) + checkState(addrs[0], true, initialBalance, initialBalance) for i := 1; i < 10; i++ { - checkState(addrs[i], false, 0, 833_333_333_333_333) + checkState(addrs[i], false, 0, initialBalance) } // Fund the new accounts and have them go online. @@ -971,26 +1018,39 @@ func TestVoterAccess(t *testing.T) { Receiver: addr, Amount: (uint64(i) + 1) * 1_000_000_000, }, &txntest.Txn{ - Type: "keyreg", - Sender: addr, - VotePK: [32]byte{byte(i + 1)}, - SelectionPK: [32]byte{byte(i + 1)}, + Type: "keyreg", + Sender: addr, + VotePK: [32]byte{byte(i + 1)}, + SelectionPK: [32]byte{byte(i + 1)}, + VoteKeyDilution: uint64(1000 + i), + StateProofPK: merklesignature.Commitment{byte(i + 1)}, }) } + roundAfterOneTwoThreeKeyreg := dl.generator.Latest() // they don't have online stake yet for _, addr := range []basics.Address{one, two, three} { - checkState(addr, false, 0, 833_333_333_333_333) + checkState(addr, false, 0, initialBalance) } + roundBeforeWait := dl.generator.Latest() for i := 0; i < 320; i++ { dl.fullBlock() } - // addr[1] is now visibly online. the total is across all five that are now online, minus various fees paid - checkState(addrs[1], true, 833_333_333_333_333-2000, 2*833_333_333_333_333-14000) + roundAfterWait := dl.generator.Latest() + // At this round, we look back 320 rounds for balance + balanceRound := roundAfterWait - 320 + t.Logf("Rounds: addr1 keyreg=%d, one/two/three keyreg=%d, before wait=%d, after wait=%d, balance round=%d", + roundAfterAddr1Keyreg, roundAfterOneTwoThreeKeyreg, roundBeforeWait, roundAfterWait, balanceRound) + // At round 339, looking back 320 rounds to round 19 for balance/stake. + // addrs[1] registered at round 2, so it IS counted (registered 17 rounds before lookback). + // one/two/three registered at round 16, so they are NOT counted yet (only 3 rounds before lookback). + // Accounts need 320 rounds after keyreg before being included in online stake calculations. + checkState(addrs[1], true, initialBalance-2*proto.MinTxnFee, 2*initialBalance-14*proto.MinTxnFee) for i := 2; i < 10; i++ { // addrs[2-9] never came online - checkState(addrs[i], false, 0, 2*833_333_333_333_333-14000) + checkState(addrs[i], false, 0, 2*initialBalance-14*proto.MinTxnFee) } + // one/two/three show as individually online, but aren't in total stake yet (need 320 rounds from keyreg) for i, addr := range []basics.Address{one, two, three} { - checkState(addr, true, (uint64(i)+1)*1_000_000_000-2000, 2*833_333_333_333_333-14000) + checkState(addr, true, (uint64(i)+1)*1_000_000_000-2*proto.MinTxnFee, 2*initialBalance-14*proto.MinTxnFee) } }) } @@ -1408,10 +1468,10 @@ func TestMinBalanceChanges(t *testing.T) { proto := l.GenesisProto() // Check balance and min balance requirement changes - require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee + require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+proto.MinTxnFee) // fee reqs := proto.BalanceRequirements() require.Equal(t, ad0init.MinBalance(reqs).Raw, ad0new.MinBalance(reqs).Raw-100000) // create - require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee + require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+proto.MinTxnFee) // fee require.Equal(t, ad5init.MinBalance(reqs).Raw, ad5new.MinBalance(reqs).Raw-100000) // optin optOutTxn := txntest.Txn{ diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 8c6bdf7b7e..3249efc50a 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -19,7 +19,6 @@ package ledger import ( "bytes" "context" - "errors" "fmt" "math/rand" "os" @@ -1169,7 +1168,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion VoteLast: 10000, } - // depends on what the concensus is need to generate correct KeyregTxnFields. + // depends on what the consensus is need to generate correct KeyregTxnFields. if proto.EnableStateProofKeyregCheck { frst, lst := uint64(correctKeyregFields.VoteFirst), uint64(correctKeyregFields.VoteLast) store, err := db.MakeAccessor("test-DB", false, true) @@ -1761,7 +1760,7 @@ func TestLedgerVerifiesOldStateProofs(t *testing.T) { _, err = l.BlockHdr(basics.Round(proto.StateProofInterval)) require.Error(t, err) expectedErr := &ledgercore.ErrNoEntry{} - require.True(t, errors.As(err, expectedErr), fmt.Sprintf("got error %s", err)) + require.ErrorAs(t, err, expectedErr, fmt.Sprintf("got error %s", err)) l.acctsOnline.voters.votersMu.Lock() for k := range l.acctsOnline.voters.votersForRoundCache { @@ -2268,7 +2267,7 @@ func TestLedgerReloadShrinkDeltas(t *testing.T) { } func resetAccountDBToV6(t *testing.T, l *Ledger) { - // reset tables and re-init again, similary to the catchpount apply code + // reset tables and re-init again, similarly to the catchpount apply code // since the ledger has only genesis accounts, this recreates them err := l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { arw, err := tx.MakeAccountsWriter() @@ -2294,7 +2293,7 @@ func resetAccountDBToV6(t *testing.T, l *Ledger) { return err0 } - if err0 := tx.Testing().AccountsUpdateSchemaTest(ctx); err != nil { + if err0 := tx.Testing().AccountsUpdateSchemaTest(ctx); err0 != nil { return err0 } @@ -2328,7 +2327,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { l.Close() }() - // reset tables and re-init again, similary to the catchpount apply code + // reset tables and re-init again, similarly to the catchpount apply code // since the ledger has only genesis accounts, this recreates them err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { arw, err := tx.MakeAccountsWriter() diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go index f12af37dc7..d39890df6b 100644 --- a/ledger/ledgercore/error.go +++ b/ledger/ledgercore/error.go @@ -27,6 +27,17 @@ import ( // ErrNoSpace indicates insufficient space for transaction in block var ErrNoSpace = errors.New("block does not have space for transaction") +// Verify each custom error type implements the error interface, and declare which are pointer/value receivers. +var ( + _ error = (*TxnNotWellFormedError)(nil) + _ error = (*TransactionInLedgerError)(nil) + _ error = (*LeaseInLedgerError)(nil) + _ error = BlockInLedgerError{} + _ error = ErrNoEntry{} + _ error = ErrNonSequentialBlockEval{} + _ error = (*TxGroupMalformedError)(nil) +) + // TxnNotWellFormedError indicates a transaction was not well-formed when evaluated by the BlockEvaluator // //msgp:ignore TxnNotWellFormedError @@ -44,7 +55,7 @@ type TransactionInLedgerError struct { } // Error satisfies builtin interface `error` -func (tile TransactionInLedgerError) Error() string { +func (tile *TransactionInLedgerError) Error() string { return fmt.Sprintf("transaction already in ledger: %v", tile.Txid) } diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 4297afc329..55d947ee28 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -33,7 +33,7 @@ const ( stateDeltaTargetOptimizationThreshold = uint64(50000000) ) -// ModifiedCreatable defines the changes to a single single creatable state +// ModifiedCreatable defines the changes to a single creatable state type ModifiedCreatable struct { // Type of the creatable: app or asset Ctype basics.CreatableType @@ -764,9 +764,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic if acct.TotalAppParams > 0 || prev.AppParams != nil { result.AppParams = make(map[basics.AppIndex]basics.AppParams) - for aidx, params := range prev.AppParams { - result.AppParams[aidx] = params - } + maps.Copy(result.AppParams, prev.AppParams) for aapp, idx := range ad.appResourcesCache { if aapp.Address == addr { rec := ad.AppResources[idx] @@ -784,9 +782,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic if acct.TotalAppLocalStates > 0 || prev.AppLocalStates != nil { result.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState) - for aidx, state := range prev.AppLocalStates { - result.AppLocalStates[aidx] = state - } + maps.Copy(result.AppLocalStates, prev.AppLocalStates) for aapp, idx := range ad.appResourcesCache { if aapp.Address == addr { rec := ad.AppResources[idx] @@ -804,9 +800,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic if acct.TotalAssetParams > 0 || prev.AssetParams != nil { result.AssetParams = make(map[basics.AssetIndex]basics.AssetParams) - for aidx, params := range prev.AssetParams { - result.AssetParams[aidx] = params - } + maps.Copy(result.AssetParams, prev.AssetParams) for aapp, idx := range ad.assetResourcesCache { if aapp.Address == addr { rec := ad.AssetResources[idx] @@ -824,9 +818,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic if acct.TotalAssets > 0 || prev.Assets != nil { result.Assets = make(map[basics.AssetIndex]basics.AssetHolding) - for aidx, params := range prev.Assets { - result.Assets[aidx] = params - } + maps.Copy(result.Assets, prev.Assets) for aapp, idx := range ad.assetResourcesCache { if aapp.Address == addr { rec := ad.AssetResources[idx] diff --git a/ledger/lrukv.go b/ledger/lrukv.go index 5b5c1a2bd2..75f01cb67d 100644 --- a/ledger/lrukv.go +++ b/ledger/lrukv.go @@ -131,10 +131,8 @@ func (m *lruKV) prune(newSize int) (removed int) { if m.kvs == nil { return } - for { - if len(m.kvs) <= newSize { - break - } + for len(m.kvs) > newSize { + back := m.kvList.Back() delete(m.kvs, back.Value.key) m.kvList.Remove(back) diff --git a/ledger/lruonlineaccts.go b/ledger/lruonlineaccts.go index 90bd69ebc1..47e39d6b9a 100644 --- a/ledger/lruonlineaccts.go +++ b/ledger/lruonlineaccts.go @@ -120,10 +120,8 @@ func (m *lruOnlineAccounts) prune(newSize int) (removed int) { if m.accounts == nil { return } - for { - if len(m.accounts) <= newSize { - break - } + for len(m.accounts) > newSize { + back := m.accountsList.Back() delete(m.accounts, back.Value.Addr) m.accountsList.Remove(back) diff --git a/ledger/lruresources.go b/ledger/lruresources.go index b869e23b07..303dd86e66 100644 --- a/ledger/lruresources.go +++ b/ledger/lruresources.go @@ -178,10 +178,8 @@ func (m *lruResources) prune(newSize int) (removed int) { if m.resources == nil { return } - for { - if len(m.resources) <= newSize { - break - } + for len(m.resources) > newSize { + back := m.resourcesList.Back() delete(m.resources, accountCreatable{address: back.Value.address, index: back.Value.Aidx}) m.resourcesList.Remove(back) diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go index 3de57948f0..415c2bb6ae 100644 --- a/ledger/msgp_gen.go +++ b/ledger/msgp_gen.go @@ -5,7 +5,6 @@ package ledger import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -471,8 +470,6 @@ func (z *CatchpointFileHeader) MsgIsZero() bool { func CatchpointFileHeaderMaxSize() (s int) { s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 11 panic("Unable to determine max size: String type z.Catchpoint is unbounded") - s += 18 + crypto.DigestMaxSize() - return } // MarshalMsg implements msgp.Marshaler diff --git a/ledger/simple_test.go b/ledger/simple_test.go index 0377a29d54..d8c24c1c14 100644 --- a/ledger/simple_test.go +++ b/ledger/simple_test.go @@ -111,17 +111,20 @@ func nextBlock(t testing.TB, ledger *Ledger) *eval.BlockEvaluator { } func fillDefaults(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txn *txntest.Txn) { - if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash { + proto := eval.ConsensusParams() + if txn.GenesisHash.IsZero() && proto.SupportGenesisHash { txn.GenesisHash = ledger.GenesisHash() } if txn.FirstValid == 0 { txn.FirstValid = eval.Round() } - if txn.Type == protocol.KeyRegistrationTx && txn.VoteFirst == 0 { + if txn.Type == protocol.KeyRegistrationTx && txn.VoteFirst == 0 && + // check this is not an offline txn + (!txn.VotePK.IsEmpty() || !txn.SelectionPK.IsEmpty()) { txn.VoteFirst = eval.Round() } - txn.FillDefaults(ledger.GenesisProto()) + txn.FillDefaults(proto) } func txns(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*txntest.Txn) { diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go index bfdc4e7df2..899dfc6b3f 100644 --- a/ledger/simulation/simulation_eval_test.go +++ b/ledger/simulation/simulation_eval_test.go @@ -9331,7 +9331,7 @@ func TestFixSigners(t *testing.T) { }) txgroup := txntest.Group(&noBalPay1, &appCall, &noBalPay2) - // Testing that our ledger lookup of accounts to retreive their AuthAddr does not crash + // Testing that our ledger lookup of accounts to retrieve their AuthAddr does not crash // and burn when the account is empty. return simulationTestCase{ diff --git a/ledger/simulation/simulator_test.go b/ledger/simulation/simulator_test.go index d8a13ed713..8e4a6d0b24 100644 --- a/ledger/simulation/simulator_test.go +++ b/ledger/simulation/simulator_test.go @@ -18,9 +18,11 @@ package simulation import ( "reflect" + "slices" "testing" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" @@ -37,15 +39,13 @@ import ( // We want to be careful that the Algod ledger does not move on to another round // so we confirm here that all ledger methods which implicitly access the current round -// are overriden within the `simulatorLedger`. +// are overridden within the `simulatorLedger`. func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - env := simulationtesting.PrepareSimulatorTest(t) - - // methods overriden by `simulatorLedger`` - overridenMethods := []string{ + // methods overridden by `simulatorLedger`` + overriddenMethods := []string{ "Latest", "LookupLatest", "LatestTotals", @@ -59,21 +59,14 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) { } methodIsSkipped := func(methodName string) bool { - for _, overridenMethod := range overridenMethods { - if overridenMethod == methodName { - return true - } + if slices.Contains(overriddenMethods, methodName) { + return true } - for _, excludedMethod := range excludedMethods { - if excludedMethod == methodName { - return true - } - } - return false + return slices.Contains(excludedMethods, methodName) } methodExistsInEvalLedger := func(methodName string) bool { - evalLedgerType := reflect.TypeOf((*eval.LedgerForEvaluator)(nil)).Elem() + evalLedgerType := reflect.TypeFor[eval.LedgerForEvaluator]() for i := 0; i < evalLedgerType.NumMethod(); i++ { if evalLedgerType.Method(i).Name == methodName { return true @@ -84,14 +77,14 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) { methodHasRoundParameter := func(methodType reflect.Type) bool { for i := 0; i < methodType.NumIn(); i++ { - if methodType.In(i) == reflect.TypeOf(basics.Round(0)) { + if methodType.In(i) == reflect.TypeFor[basics.Round]() { return true } } return false } - ledgerType := reflect.TypeOf(env.Ledger) + ledgerType := reflect.TypeFor[*data.Ledger]() for i := 0; i < ledgerType.NumMethod(); i++ { method := ledgerType.Method(i) if methodExistsInEvalLedger(method.Name) && !methodIsSkipped(method.Name) { diff --git a/ledger/store/trackerdb/data_test.go b/ledger/store/trackerdb/data_test.go index ba4efd94ef..1900ab3836 100644 --- a/ledger/store/trackerdb/data_test.go +++ b/ledger/store/trackerdb/data_test.go @@ -1249,14 +1249,14 @@ func TestBaseOnlineAccountDataReflect(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - require.Equal(t, 7, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") + require.Equal(t, 7, reflect.TypeFor[BaseOnlineAccountData]().NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") } func TestBaseVotingDataReflect(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - require.Equal(t, 7, reflect.TypeOf(BaseVotingData{}).NumField(), "update all getters and setters for baseVotingData and change the field count") + require.Equal(t, 7, reflect.TypeFor[BaseVotingData]().NumField(), "update all getters and setters for baseVotingData and change the field count") } // TestBaseAccountDataDecodeEmpty ensures no surprises when decoding nil/empty data. diff --git a/ledger/store/trackerdb/generickv/migrations.go b/ledger/store/trackerdb/generickv/migrations.go index 0613daa4d0..3fc31ec7d5 100644 --- a/ledger/store/trackerdb/generickv/migrations.go +++ b/ledger/store/trackerdb/generickv/migrations.go @@ -103,7 +103,7 @@ func (m *migrator) Migrate(ctx context.Context) error { if m.currentVersion > m.targetVersion { return nil } - // upgrade the db one version at at time + // upgrade the db one version at a time for m.currentVersion < m.targetVersion { // run next version upgrade switch m.currentVersion { diff --git a/ledger/store/trackerdb/generickv/msgp_gen.go b/ledger/store/trackerdb/generickv/msgp_gen.go index 026677c1bc..c15ead158c 100644 --- a/ledger/store/trackerdb/generickv/msgp_gen.go +++ b/ledger/store/trackerdb/generickv/msgp_gen.go @@ -161,5 +161,4 @@ func (z *creatableEntry) MsgIsZero() bool { func CreatableEntryMaxSize() (s int) { s = 1 + 6 + basics.CreatableTypeMaxSize() + 12 panic("Unable to determine max size: Byteslice type z.CreatorAddr is unbounded") - return } diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index 9e522d305d..a2387a7e56 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -9,7 +9,6 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" ) @@ -2504,15 +2503,6 @@ func (z *ResourcesData) MsgIsZero() bool { func ResourcesDataMaxSize() (s int) { s = 3 + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.BoolSize + 2 panic("Unable to determine max size: String type z.UnitName is unbounded") - s += 2 - panic("Unable to determine max size: String type z.AssetName is unbounded") - s += 2 - panic("Unable to determine max size: String type z.URL is unbounded") - s += 2 - // Calculating size of array: z.MetadataHash - s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) - s += 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.BoolSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 2 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint8Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size - return } // MarshalMsg implements msgp.Marshaler @@ -2832,14 +2822,6 @@ func TxTailRoundMaxSize() (s int) { s = 1 + 2 // Calculating size of slice: z.TxnIDs panic("Slice z.TxnIDs is unbounded") - s += 2 - // Calculating size of slice: z.LastValid - panic("Slice z.LastValid is unbounded") - s += 2 - // Calculating size of slice: z.Leases - panic("Slice z.Leases is unbounded") - s += 2 + bookkeeping.BlockHeaderMaxSize() - return } // MarshalMsg implements msgp.Marshaler diff --git a/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go b/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go index a8de77c948..2e273d3a24 100644 --- a/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go +++ b/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go @@ -72,10 +72,7 @@ func Open(dbdir string, inMem bool, proto config.ConsensusParams, log logging.Lo maxMemTableSize := 4<<30 - 1 // Capped by 4 GB memTableLimit := 2 // default: 2 - memTableSize := cache * 1024 * 1024 / 2 / memTableLimit - if memTableSize > maxMemTableSize { - memTableSize = maxMemTableSize - } + memTableSize := min(cache*1024*1024/2/memTableLimit, maxMemTableSize) // configure pebbledb opts := &pebble.Options{ diff --git a/ledger/store/trackerdb/sqlitedriver/sql_test.go b/ledger/store/trackerdb/sqlitedriver/sql_test.go index 5a890d8ded..7b48f1a352 100644 --- a/ledger/store/trackerdb/sqlitedriver/sql_test.go +++ b/ledger/store/trackerdb/sqlitedriver/sql_test.go @@ -19,7 +19,6 @@ package sqlitedriver import ( "context" "database/sql" - "errors" "testing" "github.com/algorand/go-algorand/data/basics" @@ -102,7 +101,7 @@ func TestWrapIOError(t *testing.T) { require.ErrorAs(t, wrapIOError(err), &trackerIOErr) err = sqlite3.Error{Code: sqlite3.ErrSchema} - require.False(t, errors.As(wrapIOError(err), &trackerIOErr)) + require.NotErrorAs(t, wrapIOError(err), &trackerIOErr) // confirm that double wrapping only applies once err = sqlite3.Error{Code: sqlite3.ErrIoErr} diff --git a/ledger/store/trackerdb/testinterface.go b/ledger/store/trackerdb/testinterface.go index 14e73b5c86..ce90faabb7 100644 --- a/ledger/store/trackerdb/testinterface.go +++ b/ledger/store/trackerdb/testinterface.go @@ -26,7 +26,7 @@ import ( ) // testinterface.go contains interface extensions specific to testing -// testing interfaces should be made accessible by calling the Testing() method +// Testing interfaces should be made accessible by calling the Testing() method // on the related interface. Example: // testTx := tx.Testing() // these can also be inlined: diff --git a/ledger/store/trackerdb/testsuite/dbsemantics_test.go b/ledger/store/trackerdb/testsuite/dbsemantics_test.go index b1cdf9956f..d38f2ddf34 100644 --- a/ledger/store/trackerdb/testsuite/dbsemantics_test.go +++ b/ledger/store/trackerdb/testsuite/dbsemantics_test.go @@ -75,7 +75,7 @@ func CustomTestTransaction(t *customT) { }) require.NoError(t, err) - // read the updated record outside the transaction to make sure it was commited + // read the updated record outside the transaction to make sure it was committed padA, err := aor.LookupAccount(addrA) require.NoError(t, err) require.Equal(t, uint64(98287), padA.AccountData.RewardsBase) // same updated data diff --git a/ledger/testing/randomAccounts_test.go b/ledger/testing/randomAccounts_test.go index 927d63fe82..bb5d68df91 100644 --- a/ledger/testing/randomAccounts_test.go +++ b/ledger/testing/randomAccounts_test.go @@ -30,7 +30,7 @@ func TestAccounts(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - accountDataType := reflect.TypeOf(basics.AccountData{}) + accountDataType := reflect.TypeFor[basics.AccountData]() referencedAccountTypes := make([]reflectionhelpers.TypePath, 0) reflectionhelpers.IterateReferencedTypes(accountDataType, func(path reflectionhelpers.TypePath, stack []reflect.Type) bool { diff --git a/ledger/testing/testGenesis.go b/ledger/testing/testGenesis.go index 69a5bbcc86..353f68b5bd 100644 --- a/ledger/testing/testGenesis.go +++ b/ledger/testing/testGenesis.go @@ -28,6 +28,7 @@ import ( // GenesisCfg provides a configuration object for NewTestGenesis. type GenesisCfg struct { rewardsPoolAmount basics.MicroAlgos + feeSinkAmount basics.MicroAlgos OnlineCount int } @@ -38,6 +39,14 @@ type TestGenesisOption func(*GenesisCfg) // "surprise" balance changes. var TurnOffRewards = func(cfg *GenesisCfg) { cfg.rewardsPoolAmount = basics.MicroAlgos{Raw: 100_000} } +// InitialFeeSinkBalance sets the initial balance of the fee sink to a specific value. +// This is useful for tests that need precise control over the fee sink balance. +func InitialFeeSinkBalance(microAlgos uint64) TestGenesisOption { + return func(cfg *GenesisCfg) { + cfg.feeSinkAmount = basics.MicroAlgos{Raw: microAlgos} + } +} + // NewTestGenesis creates a bunch of accounts, splits up 10B algos // between them and the rewardspool and feesink, and gives out the // addresses and secrets it creates to enable tests. For special @@ -88,8 +97,12 @@ func NewTestGenesis(opts ...TestGenesisOption) (bookkeeping.GenesisBalances, []b accts[addrs[i]] = adata } + feeSinkBal := basics.MicroAlgos{Raw: amount} + if cfg.feeSinkAmount.Raw > 0 { + feeSinkBal = cfg.feeSinkAmount + } accts[sink] = basics.AccountData{ - MicroAlgos: basics.MicroAlgos{Raw: amount}, + MicroAlgos: feeSinkBal, Status: basics.NotParticipating, } diff --git a/ledger/txtail.go b/ledger/txtail.go index 0f657f1fb4..51fed0cb39 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -400,10 +400,8 @@ func (t *txTail) recentTailHash(offset uint64, retainSize uint64) (crypto.Digest buffer := make([]byte, (retainSize)*crypto.DigestSize) bufIdx := 0 t.tailMu.RLock() - lastOffset := offset + retainSize // size of interval [offset, lastOffset) is retainSize - if lastOffset > uint64(len(t.roundTailHashes)) { - lastOffset = uint64(len(t.roundTailHashes)) - } + // size of interval [offset, lastOffset) is retainSize + lastOffset := min(offset+retainSize, uint64(len(t.roundTailHashes))) for i := offset; i < lastOffset; i++ { copy(buffer[bufIdx:], t.roundTailHashes[i][:]) bufIdx += crypto.DigestSize diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 70ef924f0a..a49c052338 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -87,7 +87,7 @@ func TestTxTailCheckdup(t *testing.T) { require.Errorf(t, err, "round %d", rnd) if rnd < lastRound-lookback-txvalidity-1 { var missingRoundErr *errTxTailMissingRound - require.Truef(t, errors.As(err, &missingRoundErr), "error a errTxTailMissingRound(%d) : %v ", rnd, err) + require.ErrorAsf(t, err, &missingRoundErr, "error a errTxTailMissingRound(%d) : %v ", rnd, err) } else { var txInLedgerErr *ledgercore.TransactionInLedgerError require.Truef(t, errors.As(err, &txInLedgerErr), "error a TransactionInLedgerError(%d) : %v ", rnd, err) @@ -101,7 +101,7 @@ func TestTxTailCheckdup(t *testing.T) { require.Errorf(t, err, "round %d", rnd) if rnd < lastRound-lookback-1 { var missingRoundErr *errTxTailMissingRound - require.Truef(t, errors.As(err, &missingRoundErr), "error a errTxTailMissingRound(%d) : %v ", rnd, err) + require.ErrorAsf(t, err, &missingRoundErr, "error a errTxTailMissingRound(%d) : %v ", rnd, err) } else { var leaseInLedgerErr *ledgercore.LeaseInLedgerError require.Truef(t, errors.As(err, &leaseInLedgerErr), "error a LeaseInLedgerError(%d) : %v ", rnd, err) diff --git a/logging/cyclicWriter.go b/logging/cyclicWriter.go index 978052d105..f2f75c7831 100644 --- a/logging/cyclicWriter.go +++ b/logging/cyclicWriter.go @@ -118,7 +118,7 @@ func procWait(cmd *exec.Cmd, cause string) { } } -// Write ensures the the underlying file can store an additional len(p) bytes. If there is not enough room left it seeks +// Write ensures the underlying file can store an additional len(p) bytes. If there is not enough room left it seeks // to the beginning of the file. func (cyclic *CyclicFileWriter) Write(p []byte) (n int, err error) { cyclic.mu.Lock() diff --git a/logging/telemetry.go b/logging/telemetry.go index acdd3f4ddf..8e69ec02a4 100644 --- a/logging/telemetry.go +++ b/logging/telemetry.go @@ -115,7 +115,7 @@ func ReadTelemetryConfigOrDefault(dataDir string, globalDir string) (cfg Telemet // Create an ephemeral config cfg = createTelemetryConfig() - // If the error was that the the config wasn't there then it wasn't really an error + // If the error was that the config wasn't there then it wasn't really an error if os.IsNotExist(err) { err = nil } else { diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go index a466f0615c..941e0c7b33 100644 --- a/logging/telemetryspec/metric.go +++ b/logging/telemetryspec/metric.go @@ -112,37 +112,37 @@ func (m AssembleBlockMetrics) Identifier() Metric { } func (m AssembleBlockStats) String() string { b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("StartCount:%d, ", m.StartCount)) - b.WriteString(fmt.Sprintf("IncludedCount:%d, ", m.IncludedCount)) - b.WriteString(fmt.Sprintf("InvalidCount:%d, ", m.InvalidCount)) - b.WriteString(fmt.Sprintf("MinFeeErrorCount:%d, ", m.MinFeeErrorCount)) - b.WriteString(fmt.Sprintf("LogicErrorCount:%d, ", m.LogicErrorCount)) - b.WriteString(fmt.Sprintf("ExpiredCount:%d, ", m.ExpiredCount)) - b.WriteString(fmt.Sprintf("ExpiredLongLivedCount:%d, ", m.ExpiredLongLivedCount)) - b.WriteString(fmt.Sprintf("LeaseErrorCount:%d, ", m.LeaseErrorCount)) - b.WriteString(fmt.Sprintf("MinFee:%d, ", m.MinFee)) - b.WriteString(fmt.Sprintf("MaxFee:%d, ", m.MaxFee)) - b.WriteString(fmt.Sprintf("AverageFee:%d, ", m.AverageFee)) - b.WriteString(fmt.Sprintf("MinLength:%d, ", m.MinLength)) - b.WriteString(fmt.Sprintf("MaxLength:%d, ", m.MaxLength)) - b.WriteString(fmt.Sprintf("MinPriority:%d, ", m.MinPriority)) - b.WriteString(fmt.Sprintf("MaxPriority:%d, ", m.MaxPriority)) - b.WriteString(fmt.Sprintf("CommittedCount:%d, ", m.CommittedCount)) - b.WriteString(fmt.Sprintf("StopReason:%s, ", m.StopReason)) - b.WriteString(fmt.Sprintf("TotalLength:%d, ", m.TotalLength)) - b.WriteString(fmt.Sprintf("EarlyCommittedCount:%d, ", m.EarlyCommittedCount)) - b.WriteString(fmt.Sprintf("Nanoseconds:%d, ", m.Nanoseconds)) - b.WriteString(fmt.Sprintf("ProcessingTime:%v, ", m.ProcessingTime)) - b.WriteString(fmt.Sprintf("BlockGenerationDuration:%d, ", m.BlockGenerationDuration)) - b.WriteString(fmt.Sprintf("TransactionsLoopStartTime:%d, ", m.TransactionsLoopStartTime)) - b.WriteString(fmt.Sprintf("StateProofNextRound:%d, ", m.StateProofNextRound)) + fmt.Fprintf(b, "StartCount:%d, ", m.StartCount) + fmt.Fprintf(b, "IncludedCount:%d, ", m.IncludedCount) + fmt.Fprintf(b, "InvalidCount:%d, ", m.InvalidCount) + fmt.Fprintf(b, "MinFeeErrorCount:%d, ", m.MinFeeErrorCount) + fmt.Fprintf(b, "LogicErrorCount:%d, ", m.LogicErrorCount) + fmt.Fprintf(b, "ExpiredCount:%d, ", m.ExpiredCount) + fmt.Fprintf(b, "ExpiredLongLivedCount:%d, ", m.ExpiredLongLivedCount) + fmt.Fprintf(b, "LeaseErrorCount:%d, ", m.LeaseErrorCount) + fmt.Fprintf(b, "MinFee:%d, ", m.MinFee) + fmt.Fprintf(b, "MaxFee:%d, ", m.MaxFee) + fmt.Fprintf(b, "AverageFee:%d, ", m.AverageFee) + fmt.Fprintf(b, "MinLength:%d, ", m.MinLength) + fmt.Fprintf(b, "MaxLength:%d, ", m.MaxLength) + fmt.Fprintf(b, "MinPriority:%d, ", m.MinPriority) + fmt.Fprintf(b, "MaxPriority:%d, ", m.MaxPriority) + fmt.Fprintf(b, "CommittedCount:%d, ", m.CommittedCount) + fmt.Fprintf(b, "StopReason:%s, ", m.StopReason) + fmt.Fprintf(b, "TotalLength:%d, ", m.TotalLength) + fmt.Fprintf(b, "EarlyCommittedCount:%d, ", m.EarlyCommittedCount) + fmt.Fprintf(b, "Nanoseconds:%d, ", m.Nanoseconds) + fmt.Fprintf(b, "ProcessingTime:%v, ", m.ProcessingTime) + fmt.Fprintf(b, "BlockGenerationDuration:%d, ", m.BlockGenerationDuration) + fmt.Fprintf(b, "TransactionsLoopStartTime:%d, ", m.TransactionsLoopStartTime) + fmt.Fprintf(b, "StateProofNextRound:%d, ", m.StateProofNextRound) emptySPStats := StateProofStats{} if m.StateProofStats != emptySPStats { - b.WriteString(fmt.Sprintf("ProvenWeight:%d, ", m.StateProofStats.ProvenWeight)) - b.WriteString(fmt.Sprintf("SignedWeight:%d, ", m.StateProofStats.SignedWeight)) - b.WriteString(fmt.Sprintf("NumReveals:%d, ", m.StateProofStats.NumReveals)) - b.WriteString(fmt.Sprintf("NumPosToReveal:%d, ", m.StateProofStats.NumPosToReveal)) - b.WriteString(fmt.Sprintf("TxnSize:%d", m.StateProofStats.TxnSize)) + fmt.Fprintf(b, "ProvenWeight:%d, ", m.StateProofStats.ProvenWeight) + fmt.Fprintf(b, "SignedWeight:%d, ", m.StateProofStats.SignedWeight) + fmt.Fprintf(b, "NumReveals:%d, ", m.StateProofStats.NumReveals) + fmt.Fprintf(b, "NumPosToReveal:%d, ", m.StateProofStats.NumPosToReveal) + fmt.Fprintf(b, "TxnSize:%d", m.StateProofStats.TxnSize) } return b.String() } diff --git a/logging/telemetryspec/metric_test.go b/logging/telemetryspec/metric_test.go index a626494c9a..af5a08eba7 100644 --- a/logging/telemetryspec/metric_test.go +++ b/logging/telemetryspec/metric_test.go @@ -68,7 +68,7 @@ func TestAssembleBlockStatsString(t *testing.T) { partitiontest.PartitionTest(t) var abs AssembleBlockStats - localType := reflect.TypeOf(abs) + localType := reflect.TypeFor[AssembleBlockStats]() // Empty StateProofStats will not be reported. Set a filed to check it printed abs.StateProofStats.ProvenWeight = 1 diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 92a7691e16..85f63997ed 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -48,7 +48,7 @@ type NetworkTemplate struct { // TemplateKMDConfig is a subset of the kmd configuration that can be overridden in the network template // by using OverrideKmdConfig TemplateOverride opts. -// The reason why config.KMDConfig cannot be used directly is that it contains DataDir field which is +// The reason why config.KMDConfig cannot be used directly is that it contains DataDir field which // is not known until the template instantiation. type TemplateKMDConfig struct { SessionLifetimeSecs uint64 diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 9ba0e1a02f..14ada96393 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -421,11 +421,7 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g minAccounts := accountsNeeded(fileCfgs.GeneratedApplicationCount, fileCfgs.GeneratedAssetsCount, params) nAccounts := fileCfgs.GeneratedAccountsCount - if minAccounts > nAccounts { - bootstrappedNet.nAccounts = minAccounts - } else { - bootstrappedNet.nAccounts = nAccounts - } + bootstrappedNet.nAccounts = max(minAccounts, nAccounts) //fund src account with enough funding rand.Seed(time.Now().UnixNano()) diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go index fd2e30c0e8..ab26d091ff 100644 --- a/netdeploy/remote/nodecfg/nodeConfigurator.go +++ b/netdeploy/remote/nodecfg/nodeConfigurator.go @@ -123,10 +123,6 @@ func (nc *nodeConfigurator) apply(rootConfigDir, rootNodeDir string) (err error) func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootConfigDir, rootNodeDir string) (nodeDirs []nodeDir, err error) { rootHostDir := filepath.Join(rootConfigDir, "hosts", nc.config.Name) - if err != nil { - err = fmt.Errorf("error loading genesis data: %v", err) - return - } genesisDir := nc.genesisData.ID() // Importing root keys is complicated - just use goal's support for it diff --git a/network/addr.go b/network/addr.go index 8fcc140883..402f1e4c76 100644 --- a/network/addr.go +++ b/network/addr.go @@ -34,6 +34,6 @@ func (wn *WebsocketNetwork) addrToGossipAddr(a string) (string, error) { if parsedURL.Scheme == "" { parsedURL.Scheme = "ws" } - parsedURL.Path = strings.Replace(path.Join(parsedURL.Path, GossipNetworkPath), "{genesisID}", wn.GetGenesisID(), -1) + parsedURL.Path = strings.ReplaceAll(path.Join(parsedURL.Path, GossipNetworkPath), "{genesisID}", wn.GetGenesisID()) return parsedURL.String(), nil } diff --git a/network/connPerfMon.go b/network/connPerfMon.go index c6e2b576e3..1399e17097 100644 --- a/network/connPerfMon.go +++ b/network/connPerfMon.go @@ -23,6 +23,7 @@ import ( "github.com/algorand/go-deadlock" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/logging" ) //msgp:ignore pmStage @@ -384,3 +385,127 @@ func (pm *connectionPerformanceMonitor) accumulateMessage(msg *IncomingMessage, delete(msgBucket.messages, msgDigest) } } + +type networkAdvanceMonitor struct { + // lastNetworkAdvance contains the last timestamp where the agreement protocol was able to make a notable progress. + // it used as a watchdog to help us detect connectivity issues ( such as cliques ) + lastNetworkAdvance time.Time + + mu deadlock.Mutex +} + +func makeNetworkAdvanceMonitor() *networkAdvanceMonitor { + return &networkAdvanceMonitor{ + lastNetworkAdvance: time.Now().UTC(), + } +} + +func (m *networkAdvanceMonitor) lastAdvancedWithin(interval time.Duration) bool { + m.mu.Lock() + defer m.mu.Unlock() + // now < last + interval <=> now - last < interval + return time.Now().UTC().Before(m.lastNetworkAdvance.Add(interval)) +} + +func (m *networkAdvanceMonitor) updateLastAdvance() { + m.mu.Lock() + defer m.mu.Unlock() + m.lastNetworkAdvance = time.Now().UTC() +} + +type outgoingConnsCloser struct { + log logging.Logger + net outgoingDisconnectable + cliqueResolveInterval time.Duration + connPerfMonitor *connectionPerformanceMonitor + netAdvMonitor *networkAdvanceMonitor +} + +type outgoingDisconnectable interface { + outgoingPeers() (peers []Peer) + numOutgoingPending() int + disconnect(badnode Peer, reason disconnectReason) + OnNetworkAdvance() +} + +func makeOutgoingConnsCloser(log logging.Logger, net outgoingDisconnectable, connPerfMonitor *connectionPerformanceMonitor, cliqueResolveInterval time.Duration) *outgoingConnsCloser { + return &outgoingConnsCloser{ + log: log, + net: net, + cliqueResolveInterval: cliqueResolveInterval, + connPerfMonitor: connPerfMonitor, + netAdvMonitor: makeNetworkAdvanceMonitor(), + } +} + +// checkExistingConnectionsNeedDisconnecting check to see if existing connection need to be dropped due to +// performance issues and/or network being stalled. +func (cc *outgoingConnsCloser) checkExistingConnectionsNeedDisconnecting(targetConnCount int) bool { + // we already connected ( or connecting.. ) to GossipFanout peers. + // get the actual peers. + outgoingPeers := cc.net.outgoingPeers() + if len(outgoingPeers) < targetConnCount { + // reset the performance monitor. + cc.connPerfMonitor.Reset([]Peer{}) + return cc.checkNetworkAdvanceDisconnect() + } + + if !cc.connPerfMonitor.ComparePeers(outgoingPeers) { + // different set of peers. restart monitoring. + cc.connPerfMonitor.Reset(outgoingPeers) + } + + // same set of peers. + peerStat := cc.connPerfMonitor.GetPeersStatistics() + if peerStat == nil { + // performance metrics are not yet ready. + return cc.checkNetworkAdvanceDisconnect() + } + + // update peers with the performance metrics we've gathered. + var leastPerformingPeer *wsPeer = nil + for _, stat := range peerStat.peerStatistics { + wsPeer := stat.peer.(*wsPeer) + wsPeer.peerMessageDelay = stat.peerDelay + cc.log.Infof("network performance monitor - peer '%s' delay %d first message portion %d%%", wsPeer.GetAddress(), stat.peerDelay, int(stat.peerFirstMessage*100)) + if wsPeer.throttledOutgoingConnection && leastPerformingPeer == nil { + leastPerformingPeer = wsPeer + } + } + if leastPerformingPeer == nil { + return cc.checkNetworkAdvanceDisconnect() + } + cc.net.disconnect(leastPerformingPeer, disconnectLeastPerformingPeer) + cc.connPerfMonitor.Reset([]Peer{}) + + return true +} + +// checkNetworkAdvanceDisconnect is using the lastNetworkAdvance indicator to see if the network is currently "stuck". +// if it's seems to be "stuck", a randomly picked peer would be disconnected. +func (cc *outgoingConnsCloser) checkNetworkAdvanceDisconnect() bool { + if cc.netAdvMonitor.lastAdvancedWithin(cc.cliqueResolveInterval) { + return false + } + outgoingPeers := cc.net.outgoingPeers() + if len(outgoingPeers) == 0 { + return false + } + if cc.net.numOutgoingPending() > 0 { + // we're currently trying to extend the list of outgoing connections. no need to + // disconnect any existing connection to free up room for another connection. + return false + } + var peer *wsPeer + disconnectPeerIdx := crypto.RandUint63() % uint64(len(outgoingPeers)) + peer = outgoingPeers[disconnectPeerIdx].(*wsPeer) + + cc.net.disconnect(peer, disconnectCliqueResolve) + cc.connPerfMonitor.Reset([]Peer{}) + cc.net.OnNetworkAdvance() + return true +} + +func (cc *outgoingConnsCloser) updateLastAdvance() { + cc.netAdvMonitor.updateLastAdvance() +} diff --git a/network/connPerfMon_test.go b/network/connPerfMon_test.go index 3be53b0922..9aae9ac029 100644 --- a/network/connPerfMon_test.go +++ b/network/connPerfMon_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -92,7 +93,7 @@ func BenchmarkConnMonitor(b *testing.B) { } } -func TestConnMonitorStageTiming(t *testing.T) { +func TestConnMonitor_StageTiming(t *testing.T) { partitiontest.PartitionTest(t) peers := []Peer{&wsPeer{}, &wsPeer{}, &wsPeer{}, &wsPeer{}} @@ -130,7 +131,7 @@ func TestConnMonitorStageTiming(t *testing.T) { } } -func TestBucketsPruning(t *testing.T) { +func TestConnMonitor_BucketsPruning(t *testing.T) { partitiontest.PartitionTest(t) bucketsCount := 100 @@ -160,3 +161,82 @@ func TestBucketsPruning(t *testing.T) { require.Equal(t, bucketsCount-i, len(perfMonitor.pendingMessagesBuckets)) } } + +type mockOutgoingNet struct { + peers []Peer + pending int + disconnectedPeer Peer + disconnectReason disconnectReason + advanceCalled bool +} + +func (m *mockOutgoingNet) outgoingPeers() (peers []Peer) { return m.peers } +func (m *mockOutgoingNet) numOutgoingPending() int { return m.pending } +func (m *mockOutgoingNet) disconnect(badnode Peer, reason disconnectReason) { + m.disconnectedPeer = badnode + m.disconnectReason = reason +} +func (m *mockOutgoingNet) OnNetworkAdvance() { m.advanceCalled = true } + +func TestConnMonitor_CheckExistingConnections_ThrottledPeers(t *testing.T) { + partitiontest.PartitionTest(t) + mon := makeConnectionPerformanceMonitor(nil) + + p1 := &wsPeer{throttledOutgoingConnection: true} + mockNet := &mockOutgoingNet{peers: []Peer{p1}} + cc := makeOutgoingConnsCloser(logging.TestingLog(t), mockNet, mon, 100*time.Second) + + res := cc.checkExistingConnectionsNeedDisconnecting(2) + require.False(t, res) + require.Nil(t, mockNet.disconnectedPeer) + + p2 := &wsPeer{throttledOutgoingConnection: false} // not throttled + mockNet = &mockOutgoingNet{peers: []Peer{p1, p2}} + cc = makeOutgoingConnsCloser(logging.TestingLog(t), mockNet, mon, 100*time.Second) + + mon.Reset(mockNet.peers) + mon.stage = pmStageStopped + mon.connectionDelay = map[Peer]int64{p1: 20, p2: 10} + mon.firstMessageCount = map[Peer]int64{p1: 1, p2: 2} + mon.msgCount = 3 + + res = cc.checkExistingConnectionsNeedDisconnecting(2) + require.True(t, res, "expected disconnect") + require.Equal(t, p1, mockNet.disconnectedPeer) + require.Equal(t, disconnectLeastPerformingPeer, mockNet.disconnectReason) +} + +func TestConnMonitor_CheckExistingConnections_NoThrottledPeers(t *testing.T) { + partitiontest.PartitionTest(t) + mon := makeConnectionPerformanceMonitor(nil) + p1 := &wsPeer{throttledOutgoingConnection: false} + p2 := &wsPeer{throttledOutgoingConnection: false} + mockNet := &mockOutgoingNet{peers: []Peer{p1, p2}} + cc := makeOutgoingConnsCloser(logging.TestingLog(t), mockNet, mon, 0) + mon.Reset(mockNet.peers) + mon.stage = pmStageStopped + mon.connectionDelay = map[Peer]int64{p1: 5, p2: 6} + mon.firstMessageCount = map[Peer]int64{p1: 1, p2: 1} + mon.msgCount = 2 + + res := cc.checkExistingConnectionsNeedDisconnecting(2) + require.True(t, res) + require.NotNil(t, mockNet.disconnectedPeer) + require.NotEqual(t, disconnectLeastPerformingPeer, mockNet.disconnectReason) +} + +func TestNetworkAdvanceMonitor(t *testing.T) { + partitiontest.PartitionTest(t) + m := makeNetworkAdvanceMonitor() + + require.True(t, m.lastAdvancedWithin(500*time.Millisecond)) + + m.mu.Lock() + m.lastNetworkAdvance = time.Now().Add(-2 * time.Second) + m.mu.Unlock() + require.False(t, m.lastAdvancedWithin(500*time.Millisecond), "expected false after stale interval") + + // update and verify within again + m.updateLastAdvance() + require.True(t, m.lastAdvancedWithin(500*time.Millisecond)) +} diff --git a/network/gossipNode.go b/network/gossipNode.go index cfd43e48fa..9594ee5e11 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -224,7 +224,7 @@ func (f HandlerFunc) Handle(message IncomingMessage) OutgoingMessage { // MessageValidatorHandler takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything) // to send to the network in response. // it supposed to perform synchronous validation and return the result of the validation -// so that network knows immediately if the message should be be broadcasted or not. +// so that network knows immediately if the message should be broadcasted or not. type MessageValidatorHandler interface { ValidateHandle(message IncomingMessage) OutgoingMessage } @@ -257,5 +257,5 @@ func Propagate(msg IncomingMessage) OutgoingMessage { // SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. func SubstituteGenesisID(net GossipNode, rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", net.GetGenesisID(), -1) + return strings.ReplaceAll(rawURL, "{genesisID}", net.GetGenesisID()) } diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index 1f2545b149..45c49d840c 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -49,16 +49,17 @@ func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, p p2pcfg.IncomingConnectionsLimit = cfg.P2PHybridIncomingConnectionsLimit identityTracker := NewIdentityTracker() - var childWsNetMeshCreator MeshCreator = meshCreator - var childP2PNetMeshCreator MeshCreator = meshCreator + var childWsNetMeshCreator = meshCreator + var childP2PNetMeshCreator = meshCreator var hybridMeshCreator MeshCreator = noopMeshCreator{} + noMeshCreatorAndHybridServer := meshCreator == nil && cfg.IsHybridServer() _, isHybridMeshCreator := meshCreator.(hybridRelayMeshCreator) - if meshCreator == nil && cfg.IsHybridServer() || isHybridMeshCreator { + if noMeshCreatorAndHybridServer || isHybridMeshCreator { // no mesh creator provided and this node is a listening/relaying node // then override and use hybrid relay meshing // or, if a hybrid relay meshing requested explicitly, do the same childWsNetMeshCreator = noopMeshCreator{} - childP2PNetMeshCreator = noopMeshPubSubFilteredCreator{} + childP2PNetMeshCreator = noopMeshCreator{} hybridMeshCreator = hybridRelayMeshCreator{} } @@ -77,6 +78,7 @@ func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, p } hybridMesh, err := hybridMeshCreator.create( + withTargetConnCount(cfg.GossipFanout), withWebsocketNetwork(wsnet), withP2PNetwork(p2pnet)) if err != nil { @@ -187,7 +189,12 @@ func (n *HybridP2PNetwork) RegisterHTTPHandlerFunc(path string, handlerFunc func } // RequestConnectOutgoing implements GossipNode -func (n *HybridP2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) {} +func (n *HybridP2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) { + _ = n.runParallel(func(net GossipNode) error { + net.RequestConnectOutgoing(replace, quit) + return nil + }) +} // GetPeers implements GossipNode func (n *HybridP2PNetwork) GetPeers(options ...PeerOption) []Peer { diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go index 911db9f410..c4e5540d59 100644 --- a/network/hybridNetwork_test.go +++ b/network/hybridNetwork_test.go @@ -39,6 +39,7 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { cfg := config.GetDefaultLocal() cfg.EnableP2PHybridMode = true + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) const p2pKeyDir = "" @@ -208,6 +209,7 @@ func TestHybridNetwork_HybridRelayStrategy(t *testing.T) { cfg := config.GetDefaultLocal() cfg.EnableP2PHybridMode = true + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) genesisInfo := GenesisInfo{genesisID, "net"} @@ -215,7 +217,7 @@ func TestHybridNetwork_HybridRelayStrategy(t *testing.T) { startNewRelayNode := func(name string, phonebook []string) (*HybridP2PNetwork, []string) { relayCfg := cfg relayCfg.ForceRelayMessages = true - // no phonebook addresses since we start and and stop it to collect the ws address + // no phonebook addresses since we start and stop it to collect the ws address net, err := NewHybridP2PNetwork(log.With("node", name), relayCfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil) require.NoError(t, err) diff --git a/network/limited_reader_slurper.go b/network/limited_reader_slurper.go index b1ecaf5137..56ed0df2ae 100644 --- a/network/limited_reader_slurper.go +++ b/network/limited_reader_slurper.go @@ -148,10 +148,7 @@ func (s *LimitedReaderSlurper) Bytes() []byte { // allocateNextBuffer allocates the next buffer and places it in the buffers array. func (s *LimitedReaderSlurper) allocateNextBuffer() { s.lastBuffer++ - allocationSize := allocationStep - if allocationSize > s.remainedUnallocatedSpace { - allocationSize = s.remainedUnallocatedSpace - } + allocationSize := min(allocationStep, s.remainedUnallocatedSpace) s.buffers[s.lastBuffer] = make([]byte, 0, allocationSize) s.remainedUnallocatedSpace -= allocationSize } diff --git a/network/limited_reader_slurper_test.go b/network/limited_reader_slurper_test.go index 38bfb7d1b2..c8c05ebd13 100644 --- a/network/limited_reader_slurper_test.go +++ b/network/limited_reader_slurper_test.go @@ -59,10 +59,7 @@ type fuzzReader struct { } func (f *fuzzReader) Read(b []byte) (n int, err error) { - s := int(crypto.RandUint64() % 19) - if s > len(b) { - s = len(b) - } + s := min(int(crypto.RandUint64()%19), len(b)) if f.pos >= len(f.buf) { return 0, io.EOF } diff --git a/network/mesh.go b/network/mesh.go index 6a1cdfb6d2..d01c144f9b 100644 --- a/network/mesh.go +++ b/network/mesh.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network/p2p" "github.com/libp2p/go-libp2p/p2p/discovery/backoff" ) @@ -43,10 +44,11 @@ type baseMesher struct { type meshConfig struct { parentCtx context.Context + targetConnCount int meshUpdateRequests chan meshRequest meshThreadInterval time.Duration backoff backoff.BackoffStrategy - netMeshFn func() bool + netMeshFn func(int) int peerStatReporter func() closer func() @@ -68,7 +70,7 @@ func withMeshExpJitterBackoff() meshOption { cfg.backoff = eb } } -func withMeshNetMeshFn(netMeshFn func() bool) meshOption { +func withMeshNetMeshFn(netMeshFn func(int) int) meshOption { return func(cfg *meshConfig) { cfg.netMeshFn = netMeshFn } @@ -102,6 +104,12 @@ func withContext(ctx context.Context) meshOption { } } +func withTargetConnCount(targetConnCount int) meshOption { + return func(cfg *meshConfig) { + cfg.targetConnCount = targetConnCount + } +} + func withWebsocketNetwork(wsnet *WebsocketNetwork) meshOption { return func(cfg *meshConfig) { cfg.wsnet = wsnet @@ -128,6 +136,9 @@ func newBaseMesher(opts ...meshOption) (*baseMesher, error) { if cfg.meshUpdateRequests == nil { return nil, errors.New("mesh update requests channel is not set") } + if cfg.targetConnCount == 0 { + logging.Base().Warn("target connection count not set, not connecting to any peers") + } if cfg.meshThreadInterval == 0 { cfg.meshThreadInterval = meshThreadInterval } @@ -155,9 +166,9 @@ func (m *baseMesher) meshThread() { return } - hasPeers := m.netMeshFn() + numOutgoing := m.netMeshFn(m.targetConnCount) if m.backoff != nil { - if hasPeers { + if numOutgoing > 0 { // found something, reset timer to the configured value timer.Reset(m.meshThreadInterval) m.backoff.Reset() @@ -228,11 +239,35 @@ func (c hybridRelayMeshCreator) create(opts ...meshOption) (mesher, error) { out := make(chan meshRequest, 5) var wg sync.WaitGroup + var prevP2PConnections = -1 // -1 means not initialized + + meshFn := func(targetConnCount int) int { + wsTarget := targetConnCount + // skip p2p mesh for the first time to give wsnet to establish connections + if prevP2PConnections != -1 && targetConnCount > prevP2PConnections { + wsTarget = targetConnCount - prevP2PConnections + } + wsConnections := cfg.wsnet.meshThreadInner(wsTarget) + + var p2pConnections int + p2pTarget := 0 // even if p2pTarget is zero it makes sense to call p2p meshThreadInner to fetch DHT peers + if wsConnections < targetConnCount { + p2pTarget = targetConnCount - wsConnections + } + p2pConnections = cfg.p2pnet.meshThreadInner(p2pTarget) + + if cfg.wsnet.log.GetLevel() >= logging.Debug && p2pTarget > 0 { + cfg.wsnet.log.Debugf("Hybrid WS-priority mesh: WS out connections=%d, P2P out connections=%d (prev=%d), target=%d", + wsConnections, p2pConnections, prevP2PConnections, targetConnCount) + } + prevP2PConnections = p2pConnections + return wsConnections + p2pConnections + } - ctx := cfg.wsnet.ctx mesh, err := newBaseMesher( - withContext(ctx), - withMeshNetMeshFn(cfg.wsnet.meshThreadInner), + withContext(cfg.wsnet.ctx), + withTargetConnCount(cfg.wsnet.config.GossipFanout), + withMeshNetMeshFn(meshFn), withMeshPeerStatReporter(func() { cfg.p2pnet.peerStater.sendPeerConnectionsTelemetryStatus(cfg.wsnet) cfg.p2pnet.peerStater.sendPeerConnectionsTelemetryStatus(cfg.p2pnet) @@ -251,21 +286,25 @@ func (c hybridRelayMeshCreator) create(opts ...meshOption) (mesher, error) { wg.Add(2) go func() { defer wg.Done() - select { - case <-ctx.Done(): - return - case req := <-cfg.wsnet.meshUpdateRequests: - out <- req + for { + select { + case <-mesh.ctx.Done(): + return + case req := <-cfg.wsnet.meshUpdateRequests: + out <- req + } } }() go func() { defer wg.Done() - select { - case <-ctx.Done(): - return - case req := <-cfg.p2pnet.meshUpdateRequests: - out <- req + for { + select { + case <-mesh.ctx.Done(): + return + case req := <-cfg.p2pnet.meshUpdateRequests: + out <- req + } } }() @@ -289,17 +328,3 @@ type noopMesh struct{} func (m *noopMesh) start() {} func (m *noopMesh) stop() {} - -type noopMeshPubSubFilteredCreator struct{} - -func (c noopMeshPubSubFilteredCreator) create(opts ...meshOption) (mesher, error) { - return &noopMesh{}, nil -} -func (c noopMeshPubSubFilteredCreator) makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig { - return networkConfig{ - pubsubOpts: []p2p.PubSubOption{ - p2p.DisablePubSubPeerExchange(), - p2p.SetPubSubPeerFilter(p2pnet.p2pRelayPeerFilter, p2pnet.pstore), - }, - } -} diff --git a/network/mesh_test.go b/network/mesh_test.go new file mode 100644 index 0000000000..dae83940e3 --- /dev/null +++ b/network/mesh_test.go @@ -0,0 +1,93 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "context" + "net/http" + "sync/atomic" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/limitcaller" + p2piface "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +// mockP2PService implements p2p.Service and counts DialPeersUntilTargetCount invocations. +// It relies on p2p's meshThreadInner's defer of DialPeersUntilTargetCount to detect invocation. +type mockP2PService struct{ dialCount atomic.Int32 } + +func (m *mockP2PService) Start() error { return nil } +func (m *mockP2PService) Close() error { return nil } +func (m *mockP2PService) ID() peer.ID { return "" } +func (m *mockP2PService) IDSigner() *p2piface.PeerIDChallengeSigner { return nil } +func (m *mockP2PService) AddrInfo() peer.AddrInfo { return peer.AddrInfo{} } +func (m *mockP2PService) NetworkNotify(network.Notifiee) {} +func (m *mockP2PService) NetworkStopNotify(network.Notifiee) {} +func (m *mockP2PService) DialPeersUntilTargetCount(int) bool { m.dialCount.Add(1); return true } +func (m *mockP2PService) ClosePeer(peer.ID) error { return nil } +func (m *mockP2PService) Conns() []network.Conn { return nil } +func (m *mockP2PService) ListPeersForTopic(string) []peer.ID { return nil } +func (m *mockP2PService) Subscribe(string, pubsub.ValidatorEx) (p2piface.SubNextCancellable, error) { + return nil, nil +} +func (m *mockP2PService) Publish(context.Context, string, []byte) error { return nil } +func (m *mockP2PService) GetHTTPClient(*peer.AddrInfo, limitcaller.ConnectionTimeStore, time.Duration) (*http.Client, error) { + return &http.Client{}, nil +} + +// TestMesh_HybridRelayP2PInnerCall ensures the wsConnections <= targetConnCount condition +// in the hybridRelayMeshCreator mesh function in order to make sure P2PNetwork.meshThreadInner is invoked +func TestMesh_HybridRelayP2PInnerCall(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.GossipFanout = 0 + cfg.DNSBootstrapID = "" + cfg.EnableP2PHybridMode = true + cfg.PublicAddress = "public-address" + cfg.NetAddress = "127.0.0.1:0" + cfg.P2PHybridNetAddress = "127.0.0.1:0" + + log := logging.TestingLog(t) + genesisInfo := GenesisInfo{GenesisID: "test-genesis", NetworkID: protocol.NetworkID("test-network")} + net, err := NewHybridP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil) + require.NoError(t, err) + + mockSvc := &mockP2PService{} + net.p2pNetwork.service = mockSvc + net.p2pNetwork.relayMessages = false // prevent pubsub startup + + err = net.Start() + require.NoError(t, err) + defer net.Stop() + + net.RequestConnectOutgoing(false, nil) + require.Eventually(t, func() bool { + // RequestConnectOutgoing queues mesh update request so we have to wait a bit + return mockSvc.dialCount.Load() > 0 + }, 3*time.Second, 50*time.Millisecond, "expected DialPeersUntilTargetCount to be called") +} diff --git a/network/metrics.go b/network/metrics.go index aa3fc23a9f..5400c927a3 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -117,6 +117,15 @@ var networkP2PGossipSubReceivedBytesTotal = metrics.MakeCounter(metrics.MetricNa // var networkP2PGossipSubSentMsgs = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_p2p_gs_message_sent", Description: "Number of complete messages that were sent to the network through gossipsub"}) +var networkVoteBroadcastCompressedBytes = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vote_compressed_bytes_broadcast_total", Description: "Total AV message bytes broadcast after applying stateless compression"}) +var networkVoteBroadcastUncompressedBytes = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vote_uncompressed_bytes_broadcast_total", Description: "Total AV message bytes broadcast before applying stateless compression"}) +var networkVPCompressionErrors = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_compression_errors_total", Description: "Total number of stateful vote compression errors"}) +var networkVPDecompressionErrors = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_decompression_errors_total", Description: "Total number of stateful vote decompression errors"}) +var networkVPAbortMessagesSent = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_abort_messages_sent_total", Description: "Total number of vpack abort messages sent to peers"}) +var networkVPAbortMessagesReceived = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_abort_messages_received_total", Description: "Total number of vpack abort messages received from peers"}) +var networkVPCompressedBytesSent = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_compressed_bytes_sent_total", Description: "Total VP message bytes sent, after compressing AV to VP messages"}) +var networkVPUncompressedBytesSent = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_vpack_uncompressed_bytes_sent_total", Description: "Total VP message bytes sent, before compressing AV to VP messages"}) + var _ = pubsub.RawTracer(pubsubMetricsTracer{}) // pubsubMetricsTracer is a tracer for pubsub events used to track metrics. diff --git a/network/msgCompressor.go b/network/msgCompressor.go index 7eec31286d..6b51476726 100644 --- a/network/msgCompressor.go +++ b/network/msgCompressor.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "sync/atomic" "github.com/DataDog/zstd" @@ -32,14 +33,25 @@ var zstdCompressionMagic = [4]byte{0x28, 0xb5, 0x2f, 0xfd} const zstdCompressionLevel = zstd.BestSpeed +// voteCompressionAbortMessage is a single-byte payload sent with a VP tag to signal +// that stateful compression should be disabled for this connection. +// When either encoder or decoder encounters an error, it sends VP+0xFF to notify +// the peer, then both sides disable stateful compression and fall back to AV messages. +const voteCompressionAbortMessage byte = 0xFF + +// voteCompressionError wraps errors from stateful vote compression/decompression. +// This error type signals that an abort message should be sent to the peer. +type voteCompressionError struct{ err error } + +func (e *voteCompressionError) Error() string { return e.err.Error() } +func (e *voteCompressionError) Unwrap() error { return e.err } + // zstdCompressMsg returns a concatenation of a tag and compressed data func zstdCompressMsg(tbytes []byte, d []byte) ([]byte, string) { - bound := zstd.CompressBound(len(d)) - if bound < len(d) { + bound := max(zstd.CompressBound(len(d)), // although CompressBound allocated more than the src size, this is an implementation detail. // increase the buffer size to always have enough space for the raw data if compression fails. - bound = len(d) - } + len(d)) mbytesComp := make([]byte, len(tbytes)+bound) copy(mbytesComp, tbytes) comp, err := zstd.CompressLevel(mbytesComp[len(tbytes):], d, zstdCompressionLevel) @@ -76,16 +88,30 @@ func vpackCompressVote(tbytes []byte, d []byte) ([]byte, string) { // and should be larger. const MaxDecompressedMessageSize = 20 * 1024 * 1024 // some large enough value -// wsPeerMsgDataDecoder performs optional incoming messages conversion. -// At the moment it only supports zstd decompression for payload proposal, -// and vpack decompression for votes. -type wsPeerMsgDataDecoder struct { +// wsPeerMsgCodec performs optional message compression/decompression for certain +// types of messages. It handles: +// - zstd compression for PP proposals (outgoing not implemented) +// - stateless vpack compression for AV votes (outgoing not implemented) +// - stateful vpack compression for VP votes (both directions) +type wsPeerMsgCodec struct { log logging.Logger origin string - // actual converter(s) + // decompressors ppdec zstdProposalDecompressor avdec vpackVoteDecompressor + + // stateful vote compression (if enabled). + // If either side encounters an error, or if we receive an abort, we disable + // stateful compression entirely and fall back to stateless AV traffic. + statefulVoteEnabled atomic.Bool + statefulVoteTableSize uint + statefulVoteEnc *vpack.StatefulEncoder + statefulVoteDec *vpack.StatefulDecoder +} + +func (c *wsPeerMsgCodec) switchOffStatefulVoteCompression() { + c.statefulVoteEnabled.Store(false) } type zstdProposalDecompressor struct{} @@ -126,8 +152,58 @@ func (dec zstdProposalDecompressor) convert(data []byte) ([]byte, error) { } } -func (c *wsPeerMsgDataDecoder) convert(tag protocol.Tag, data []byte) ([]byte, error) { - if tag == protocol.ProposalPayloadTag { +// compress attempts to compress an outgoing message. +// Currently only supports stateful vote compression. +// Returns compressed data and nil error if compression succeeds, +// (nil, nil) if compression is not applicable, +// (nil, vpError) if stateful compression fails (caller should send abort message). +func (c *wsPeerMsgCodec) compress(tag protocol.Tag, data []byte) ([]byte, error) { + if tag == protocol.AgreementVoteTag && c.statefulVoteEnabled.Load() { + // Skip the tag bytes (first 2 bytes are the AV tag) + if len(data) < 2 { + return nil, nil + } + // Input data is AV+stateless-compressed from broadcast + // We only need to apply stateful compression on top + statelessCompressed := data[2:] + + // initialize stateful encoder on first use + if c.statefulVoteEnc == nil { + enc, err := vpack.NewStatefulEncoder(c.statefulVoteTableSize) + if err != nil { + c.log.Warnf("failed to initialize stateful vote encoder for peer %s, disabling: %v", c.origin, err) + networkVPCompressionErrors.Inc(nil) + c.switchOffStatefulVoteCompression() + return nil, &voteCompressionError{err: err} + } + c.statefulVoteEnc = enc + c.log.Debugf("stateful vote encoder initialized for peer %s (table size %d)", c.origin, c.statefulVoteTableSize) + } + + tagLen := len(protocol.VotePackedTag) + result := make([]byte, tagLen+vpack.MaxCompressedVoteSize) + copy(result, protocol.VotePackedTag) + // apply stateful compression to stateless-compressed data + compressed, err := c.statefulVoteEnc.Compress(result[tagLen:], statelessCompressed) + if err != nil { + c.log.Warnf("stateful vote compression failed for peer %s, disabling: %v", c.origin, err) + networkVPCompressionErrors.Inc(nil) + c.switchOffStatefulVoteCompression() + return nil, &voteCompressionError{err: err} + } + finalResult := result[:tagLen+len(compressed)] + // Track stateful compression layer only: stateless-compressed input → VP output + networkVPUncompressedBytesSent.AddUint64(uint64(len(statelessCompressed)), nil) + networkVPCompressedBytesSent.AddUint64(uint64(len(compressed)), nil) + return finalResult, nil + } + return nil, nil +} + +// decompress handles incoming message decompression based on tag type +func (c *wsPeerMsgCodec) decompress(tag protocol.Tag, data []byte) ([]byte, error) { + switch tag { + case protocol.ProposalPayloadTag: // sender might support compressed payload but fail to compress for whatever reason, // in this case it sends non-compressed payload - the receiver decompress only if it is compressed. if c.ppdec.accept(data) { @@ -138,7 +214,8 @@ func (c *wsPeerMsgDataDecoder) convert(tag protocol.Tag, data []byte) ([]byte, e return res, nil } c.log.Warnf("peer %s supported zstd but sent non-compressed data", c.origin) - } else if tag == protocol.AgreementVoteTag { + + case protocol.AgreementVoteTag: if c.avdec.enabled { res, err := c.avdec.convert(data) if err != nil { @@ -148,12 +225,59 @@ func (c *wsPeerMsgDataDecoder) convert(tag protocol.Tag, data []byte) ([]byte, e } return res, nil } + + case protocol.VotePackedTag: + // Check for abort message first + if len(data) == 1 && data[0] == voteCompressionAbortMessage { + c.log.Infof("Received VP abort message from peer %s, disabling stateful encoding", c.origin) + networkVPAbortMessagesReceived.Inc(nil) + // Peer signalled stateful compression should stop; disable both encode and decode paths. + c.switchOffStatefulVoteCompression() + // Drop this message silently (it's just a control signal) + return nil, nil + } + + if !c.statefulVoteEnabled.Load() { + c.log.Debugf("dropping VP message from %s: stateful decompression disabled", c.origin) + return nil, nil + } + if c.statefulVoteDec == nil { + dec, err := vpack.NewStatefulDecoder(c.statefulVoteTableSize) + if err != nil { + c.log.Warnf("failed to initialize stateful vote decoder for peer %s, disabling: %v", c.origin, err) + networkVPDecompressionErrors.Inc(nil) + c.switchOffStatefulVoteCompression() + return nil, &voteCompressionError{err: err} + } + c.statefulVoteDec = dec + c.log.Debugf("stateful vote decoder initialized for peer %s (table size %d)", c.origin, c.statefulVoteTableSize) + } + // StatefulDecoder decompresses to "stateless-compressed" format + statelessCompressed, err := c.statefulVoteDec.Decompress(make([]byte, 0, vpack.MaxCompressedVoteSize), data) + if err != nil { + c.log.Warnf("stateful vote decompression failed for peer %s, disabling: %v", c.origin, err) + networkVPDecompressionErrors.Inc(nil) + c.switchOffStatefulVoteCompression() + return nil, &voteCompressionError{err: err} + } + + var statelessDec vpack.StatelessDecoder + voteBody, err := statelessDec.DecompressVote(make([]byte, 0, vpack.MaxMsgpackVoteSize), statelessCompressed) + if err != nil { + c.log.Warnf("stateless vote decompression failed after stateful for peer %s, disabling: %v", c.origin, err) + networkVPDecompressionErrors.Inc(nil) + c.switchOffStatefulVoteCompression() + return nil, &voteCompressionError{err: err} + } + + return voteBody, nil } + return data, nil } -func makeWsPeerMsgDataDecoder(wp *wsPeer) *wsPeerMsgDataDecoder { - c := wsPeerMsgDataDecoder{ +func makeWsPeerMsgCodec(wp *wsPeer) *wsPeerMsgCodec { + c := wsPeerMsgCodec{ log: wp.log, origin: wp.originAddress, } @@ -166,5 +290,21 @@ func makeWsPeerMsgDataDecoder(wp *wsPeer) *wsPeerMsgDataDecoder { dec: vpack.NewStatelessDecoder(), } } + + // Initialize stateful compression negotiation details if both nodes support it + // Stateful compression requires stateless compression to be available since VP messages + // decompress in two stages: VP → stateless-compressed → raw vote + if wp.enableVoteCompression && // this node's configuration allows vote compression + wp.voteCompressionTableSize > 0 && // this node's configuration allows stateful vote compression + wp.vpackVoteCompressionSupported() && // the other side has advertised vote compression + wp.vpackStatefulCompressionSupported() { // the other side has advertised stateful vote compression + tableSize := wp.getBestVpackTableSize() + if tableSize > 0 { + c.statefulVoteEnabled.Store(true) + c.statefulVoteTableSize = tableSize + wp.log.Debugf("Stateful compression negotiated with table size %d (our max: %d)", tableSize, wp.voteCompressionTableSize) + } + } + return &c } diff --git a/network/msgCompressor_test.go b/network/msgCompressor_test.go index d64b8fb54f..333c0b0baa 100644 --- a/network/msgCompressor_test.go +++ b/network/msgCompressor_test.go @@ -17,16 +17,51 @@ package network import ( + "context" + "errors" "strings" "testing" + "time" "github.com/DataDog/zstd" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var ( + sampleVote1 = map[string]any{ + "cred": map[string]any{"pf": crypto.VrfProof{1}}, + "r": map[string]any{"rnd": uint64(2), "snd": [32]byte{3}}, + "sig": map[string]any{ + "p": [32]byte{4}, "p1s": [64]byte{5}, "p2": [32]byte{6}, + "p2s": [64]byte{7}, "ps": [64]byte{}, "s": [64]byte{9}, + }, + } + sampleVote2 = map[string]any{ + "cred": map[string]any{"pf": crypto.VrfProof{2}}, + "r": map[string]any{"rnd": uint64(3), "snd": [32]byte{4}}, + "sig": map[string]any{ + "p": [32]byte{5}, "p1s": [64]byte{6}, "p2": [32]byte{7}, + "p2s": [64]byte{8}, "ps": [64]byte{}, "s": [64]byte{10}, + }, + } + sampleVote3 = map[string]any{ + "cred": map[string]any{"pf": crypto.VrfProof{3}}, + "r": map[string]any{"rnd": uint64(4), "snd": [32]byte{5}}, + "sig": map[string]any{ + "p": [32]byte{6}, "p1s": [64]byte{7}, "p2": [32]byte{8}, + "p2s": [64]byte{9}, "ps": [64]byte{}, "s": [64]byte{11}, + }, + } +) + func TestZstdDecompress(t *testing.T) { partitiontest.PartitionTest(t) @@ -76,12 +111,12 @@ func (cl *converterTestLogger) Warnf(s string, args ...interface{}) { func TestWsPeerMsgDataConverterConvert(t *testing.T) { partitiontest.PartitionTest(t) - c := wsPeerMsgDataDecoder{} + c := wsPeerMsgCodec{} c.ppdec = zstdProposalDecompressor{} tag := protocol.AgreementVoteTag data := []byte("data") - r, err := c.convert(tag, data) + r, err := c.decompress(tag, data) require.NoError(t, err) require.Equal(t, data, r) @@ -89,7 +124,7 @@ func TestWsPeerMsgDataConverterConvert(t *testing.T) { l := converterTestLogger{} c.log = &l c.ppdec = zstdProposalDecompressor{} - r, err = c.convert(tag, data) + r, err = c.decompress(tag, data) require.NoError(t, err) require.Equal(t, data, r) require.Equal(t, 1, l.warnMsgCount) @@ -100,8 +135,420 @@ func TestWsPeerMsgDataConverterConvert(t *testing.T) { comp, err := zstd.Compress(nil, data) require.NoError(t, err) - r, err = c.convert(tag, comp) + r, err = c.decompress(tag, comp) require.NoError(t, err) require.Equal(t, data, r) require.Equal(t, 0, l.warnMsgCount) } + +func TestMakeWsPeerMsgCodec_StatefulRequiresStateless(t *testing.T) { + partitiontest.PartitionTest(t) + + // Create a mock wsPeer with stateful compression features but WITHOUT stateless + wp := &wsPeer{} + wp.wsPeerCore.log = logging.TestingLog(t) + wp.wsPeerCore.originAddress = "test-peer" + wp.enableVoteCompression = true + wp.voteCompressionTableSize = 512 + wp.features = pfCompressedVoteVpackStateful512 // stateful enabled but NOT pfCompressedVoteVpack + + codec := makeWsPeerMsgCodec(wp) + + // Stateless should not be enabled (no pfCompressedVoteVpack) + assert.False(t, codec.avdec.enabled, "Stateless decompression should not be enabled when pfCompressedVoteVpack is not advertised") + + // Stateful should not be enabled even though stateful features are advertised + // because stateful requires stateless to work (VP -> stateless -> raw) + assert.False(t, codec.statefulVoteEnabled.Load(), "Stateful compression should not be enabled without stateless support") + + // Now test with both stateless AND stateful enabled + wp.features = pfCompressedVoteVpack | pfCompressedVoteVpackStateful512 + + codec = makeWsPeerMsgCodec(wp) + + // Both stateless and stateful should be enabled + assert.True(t, codec.avdec.enabled, "Stateless decompression should be enabled when pfCompressedVoteVpack is advertised") + assert.True(t, codec.statefulVoteEnabled.Load(), "Stateful compression should be enabled when both stateless and stateful features are supported") +} + +type voteCompressionNetwork interface { + Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error + RegisterHandlers(dispatch []TaggedMessageHandler) + GetPeers(options ...PeerOption) []Peer +} + +type voteTestNet struct { + name string + network voteCompressionNetwork + stop func() + peerFn func() *wsPeer +} + +type voteNetFactory func(t *testing.T, cfgA, cfgB config.Local) (*voteTestNet, *voteTestNet) + +func waitForSinglePeer(t *testing.T, vn *voteTestNet) *wsPeer { + require.NotNil(t, vn.peerFn, "%s: peer accessor not set", vn.name) + var result *wsPeer + require.Eventually(t, func() bool { + result = vn.peerFn() + return result != nil + }, 5*time.Second, 50*time.Millisecond) + return result +} + +func makeWebsocketVoteNets(t *testing.T, cfgA, cfgB config.Local) (*voteTestNet, *voteTestNet) { + netA := makeTestWebsocketNodeWithConfig(t, cfgA) + netA.Start() + + netB := makeTestWebsocketNodeWithConfig(t, cfgB) + + addrA, postListen := netA.Address() + require.True(t, postListen) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.RelayRole) + netB.Start() + + readyTimeout := time.NewTimer(2 * time.Second) + defer readyTimeout.Stop() + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + + require.Eventually(t, func() bool { + return len(netA.GetPeers(PeersConnectedIn)) == 1 && len(netB.GetPeers(PeersConnectedOut)) == 1 + }, 5*time.Second, 50*time.Millisecond) + + return &voteTestNet{ + name: "websocket-A", + network: netA, + stop: func() { netStop(t, netA, "A") }, + peerFn: func() *wsPeer { + peers := netA.GetPeers(PeersConnectedIn) + if len(peers) != 1 { + return nil + } + return peers[0].(*wsPeer) + }, + }, &voteTestNet{ + name: "websocket-B", + network: netB, + stop: func() { netStop(t, netB, "B") }, + peerFn: func() *wsPeer { + peers := netB.GetPeers(PeersConnectedOut) + if len(peers) != 1 { + return nil + } + return peers[0].(*wsPeer) + }, + } +} + +func makeP2PVoteNets(t *testing.T, cfgA, cfgB config.Local) (*voteTestNet, *voteTestNet) { + log := logging.TestingLog(t) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} + + cfgA.DNSBootstrapID = "" + cfgA.NetAddress = "127.0.0.1:0" + cfgA.GossipFanout = 1 + netA, err := NewP2PNetwork(log.With("name", "netA"), cfgA, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) + require.NoError(t, err) + require.NoError(t, netA.Start()) + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotEmpty(t, addrsA) + + cfgB.DNSBootstrapID = "" + cfgB.NetAddress = "" + cfgB.GossipFanout = 1 + phoneBookAddresses := []string{addrsA[0].String()} + netB, err := NewP2PNetwork(log.With("name", "netB"), cfgB, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) + require.NoError(t, err) + require.NoError(t, netB.Start()) + + require.Eventually(t, func() bool { + return len(netA.service.Conns()) > 0 && len(netB.service.Conns()) > 0 + }, 5*time.Second, 50*time.Millisecond) + + require.Eventually(t, func() bool { + return len(netA.GetPeers(PeersConnectedIn)) == 1 && len(netB.GetPeers(PeersConnectedOut)) == 1 + }, 5*time.Second, 50*time.Millisecond) + + return &voteTestNet{ + name: "p2p-A", + network: netA, + stop: func() { netA.Stop() }, + peerFn: func() *wsPeer { + netA.wsPeersLock.RLock() + defer netA.wsPeersLock.RUnlock() + for _, peer := range netA.wsPeers { + return peer + } + return nil + }, + }, &voteTestNet{ + name: "p2p-B", + network: netB, + stop: func() { netB.Stop() }, + peerFn: func() *wsPeer { + netB.wsPeersLock.RLock() + defer netB.wsPeersLock.RUnlock() + for _, peer := range netB.wsPeers { + return peer + } + return nil + }, + } +} + +func TestVoteStatefulCompressionAbortMessage(t *testing.T) { + partitiontest.PartitionTest(t) + + scenarios := []struct { + name string + induce func(t *testing.T, peerAtoB, peerBtoA *wsPeer) + extraMessages [][]byte + }{{ + name: "decoder_abort", + induce: func(t *testing.T, peerAtoB, peerBtoA *wsPeer) { + malformedVP := append([]byte(protocol.VotePackedTag), byte(0x00)) + require.True(t, peerBtoA.writeNonBlock(context.Background(), malformedVP, true, crypto.Digest{}, time.Now()), + "failed to enqueue malformed VP message") + }, + }, { + name: "encoder_abort", + induce: func(t *testing.T, peerAtoB, peerBtoA *wsPeer) { + // pretend to VP abort w/out a message + peerAtoB.msgCodec.switchOffStatefulVoteCompression() + close, _ := peerAtoB.handleVPError(&voteCompressionError{err: errors.New("forced encoder failure")}) + require.False(t, close, "encoder abort should not close connection") + }, + }, { + name: "encoder_hello", + induce: func(t *testing.T, peerAtoB, peerBtoA *wsPeer) { + // send a bad AV message that triggers VP abort, but passes through anyway to the other side + helloAV := append([]byte(protocol.AgreementVoteTag), "hello"...) + require.True(t, peerAtoB.writeNonBlock(context.Background(), helloAV, true, crypto.Digest{}, time.Now()), + "failed to enqueue malformed AV message") + }, + extraMessages: [][]byte{[]byte("hello")}, + }} + + factories := []struct { + name string + factory voteNetFactory + }{ + {"Websocket", makeWebsocketVoteNets}, + {"P2P", makeP2PVoteNets}, + } + + for _, f := range factories { + t.Run(f.name, func(t *testing.T) { + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + testVoteStaticCompressionAbortMessage(t, f.factory, scenario.induce, scenario.extraMessages) + }) + } + }) + } +} + +func testVoteStaticCompressionAbortMessage(t *testing.T, factory voteNetFactory, induce func(t *testing.T, peerAtoB, peerBtoA *wsPeer), extraMessages [][]byte) { + cfgA := defaultConfig + cfgA.GossipFanout = 1 + cfgA.EnableVoteCompression = true + cfgA.StatefulVoteCompressionTableSize = 256 + + cfgB := cfgA + + netA, netB := factory(t, cfgA, cfgB) + defer netA.stop() + defer netB.stop() + + peerAtoB := waitForSinglePeer(t, netA) + peerBtoA := waitForSinglePeer(t, netB) + + voteData := protocol.EncodeReflect(sampleVote1) + fallbackVotes := [][]byte{ + protocol.EncodeReflect(sampleVote2), + protocol.EncodeReflect(sampleVote3), + } + // some subtests might expect extra messages to appear to test "pass through" error handling + allVotes := append([][]byte{voteData}, extraMessages...) + allVotes = append(allVotes, fallbackVotes...) + matcher := newMessageMatcher(t, allVotes) + allDone := matcher.done + netB.network.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: matcher}}) + + // send first (valid) vote + require.NoError(t, netA.network.Broadcast(context.Background(), protocol.AgreementVoteTag, voteData, true, nil)) + + require.Eventually(t, func() bool { + matcher.lock.Lock() + defer matcher.lock.Unlock() + return len(matcher.received) >= 1 + }, 2*time.Second, 50*time.Millisecond, "timeout waiting for initial vote") + + assert.True(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "Stateful compression not established on A->B") + assert.True(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "Stateful compression not established on B->A") + + // induce some kind of error + induce(t, peerAtoB, peerBtoA) + + // stateful should be disabled on both sides after abort, but connection should stay up + require.Eventually(t, func() bool { + return !peerAtoB.msgCodec.statefulVoteEnabled.Load() + }, 2*time.Second, 50*time.Millisecond, "Stateful compression not disabled on A->B after abort trigger") + + require.Eventually(t, func() bool { + return !peerBtoA.msgCodec.statefulVoteEnabled.Load() + }, 2*time.Second, 50*time.Millisecond, "Stateful compression not disabled on B->A after abort trigger") + assert.False(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should be disabled on B->A after abort") + assert.False(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should be disabled on A->B after sending abort") + + require.Len(t, netA.network.GetPeers(PeersConnectedIn), 1, "connection should still be alive after abort") + require.Len(t, netB.network.GetPeers(PeersConnectedOut), 1, "connection should still be alive after abort") + + // send through some more votes, just to show they still get through + for _, msg := range fallbackVotes { + require.NoError(t, netA.network.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)) + } + + // assert the whole sequence was received on the other side + select { + case <-allDone: + case <-time.After(2 * time.Second): + require.Fail(t, "timeout waiting for fallback votes after abort") + } + require.True(t, matcher.Match(), "received votes mismatch after abort") +} + +func TestVoteStatefulVoteCompression(t *testing.T) { + partitiontest.PartitionTest(t) + + scenarios := []struct { + name string + msgs [][]byte + expectCompressionOff bool + }{ + {"ValidVotes", [][]byte{protocol.EncodeReflect(sampleVote1), protocol.EncodeReflect(sampleVote2)}, false}, + {"InvalidVotes", [][]byte{[]byte("hello1"), []byte("hello2"), []byte("hello3")}, true}, + } + + factories := []struct { + name string + factory voteNetFactory + }{ + {"Websocket", makeWebsocketVoteNets}, + {"P2P", makeP2PVoteNets}, + } + + for _, f := range factories { + t.Run(f.name, func(t *testing.T) { + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + testStatefulVoteCompression(t, scenario.msgs, !scenario.expectCompressionOff, f.factory) + }) + } + }) + } +} + +// test negotiation with different advertised settings on both ends, plus valid and invalid votes propagate correctly +func testStatefulVoteCompression(t *testing.T, msgs [][]byte, expectCompressionAfter bool, factory voteNetFactory) { + type testCase struct { + name string + netATableSize uint + netBTableSize uint + expectedSize uint + expectDynamic bool + } + + testCases := []testCase{ + {"disabled_disabled", 0, 0, 0, false}, + {"disabled_16", 0, 16, 0, false}, + {"16_disabled", 16, 0, 0, false}, + {"disabled_1024", 0, 1024, 0, false}, + {"1024_disabled", 1024, 0, 0, false}, + {"disabled_2048", 0, 2048, 0, false}, + {"2048_disabled", 2048, 0, 0, false}, + {"16_16", 16, 16, 16, true}, + {"32_32", 32, 32, 32, true}, + {"64_64", 64, 64, 64, true}, + {"128_128", 128, 128, 128, true}, + {"256_256", 256, 256, 256, true}, + {"512_512", 512, 512, 512, true}, + {"1024_1024", 1024, 1024, 1024, true}, + {"2048_2048", 2048, 2048, 2048, true}, + {"16_32", 16, 32, 16, true}, + {"32_16", 32, 16, 16, true}, + {"16_1024", 16, 1024, 16, true}, + {"1024_16", 1024, 16, 16, true}, + {"16_2048", 16, 2048, 16, true}, + {"2048_16", 2048, 16, 16, true}, + {"64_256", 64, 256, 64, true}, + {"256_64", 256, 64, 64, true}, + {"128_512", 128, 512, 128, true}, + {"512_128", 512, 128, 128, true}, + {"256_1024", 256, 1024, 256, true}, + {"1024_256", 1024, 256, 256, true}, + {"256_2048", 256, 2048, 256, true}, + {"2048_256", 2048, 256, 256, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfgA := defaultConfig + cfgA.GossipFanout = 1 + cfgA.EnableVoteCompression = true + cfgA.StatefulVoteCompressionTableSize = tc.netATableSize + + cfgB := defaultConfig + cfgB.GossipFanout = 1 + cfgB.EnableVoteCompression = true + cfgB.StatefulVoteCompressionTableSize = tc.netBTableSize + + netA, netB := factory(t, cfgA, cfgB) + defer netA.stop() + defer netB.stop() + + peerAtoB := waitForSinglePeer(t, netA) + peerBtoA := waitForSinglePeer(t, netB) + + if tc.expectDynamic { + require.True(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "A->B peer should have stateful compression enabled") + require.True(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "B->A peer should have stateful compression enabled") + require.Equal(t, uint(tc.expectedSize), peerAtoB.getBestVpackTableSize(), "A->B peer should have expected table size") + require.Equal(t, uint(tc.expectedSize), peerBtoA.getBestVpackTableSize(), "B->A peer should have expected table size") + } else { + require.False(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "A->B peer should not have stateful compression enabled") + require.False(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "B->A peer should not have stateful compression enabled") + } + + matcher := newMessageMatcher(t, msgs) + counterDone := matcher.done + netB.network.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: matcher}}) + + for _, msg := range msgs { + require.NoError(t, netA.network.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)) + } + + select { + case <-counterDone: + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for vote messages, count=%d, wanted %d", len(matcher.received), len(msgs)) + } + + require.True(t, matcher.Match(), "Received messages don't match sent messages") + + if tc.expectDynamic { + if expectCompressionAfter { + require.True(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should still be enabled after sending valid votes") + require.True(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should still be enabled after receiving valid votes") + } else { + require.False(t, peerAtoB.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should be disabled after sending invalid messages") + require.False(t, peerBtoA.msgCodec.statefulVoteEnabled.Load(), "Stateful compression should be disabled after receiving abort from peer") + } + } + }) + } +} diff --git a/network/msgOfInterest.go b/network/msgOfInterest.go index 2830659d2d..fd07e60c0d 100644 --- a/network/msgOfInterest.go +++ b/network/msgOfInterest.go @@ -46,7 +46,7 @@ func unmarshallMessageOfInterest(data []byte) (map[protocol.Tag]bool, error) { } // convert the tags into a tags map. msgTagsMap := make(map[protocol.Tag]bool, len(tags)) - for _, tag := range strings.Split(string(tags), topicsEncodingSeparator) { + for tag := range strings.SplitSeq(string(tags), topicsEncodingSeparator) { if len(tag) != protocol.TagLength { return nil, errInvalidMessageOfInterestInvalidTag } diff --git a/network/msgp_gen.go b/network/msgp_gen.go index 5451e5e53a..3d94f7f478 100644 --- a/network/msgp_gen.go +++ b/network/msgp_gen.go @@ -169,7 +169,6 @@ func (z disconnectReason) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func DisconnectReasonMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -1390,12 +1389,6 @@ func PeerMetaHeadersMaxSize() (s int) { // Adding size of map keys for z s += maxHeaderKeys panic("Unable to determine max size: String type za0006 is unbounded") - // Adding size of map values for z - s += maxHeaderKeys - // Calculating size of slice: za0007 - s += msgp.ArrayHeaderSize - panic("Unable to determine max size: String type is unbounded for za0007[za0008]") - return } // MarshalMsg implements msgp.Marshaler @@ -1484,5 +1477,4 @@ func PeerMetaValuesMaxSize() (s int) { // Calculating size of slice: z s += msgp.ArrayHeaderSize panic("Unable to determine max size: String type is unbounded for z[za0001]") - return } diff --git a/network/multiplexer.go b/network/multiplexer.go index 4d498afb17..abe5d209f4 100644 --- a/network/multiplexer.go +++ b/network/multiplexer.go @@ -18,6 +18,7 @@ package network import ( "fmt" + "maps" "sync/atomic" ) @@ -83,11 +84,8 @@ func (m *Multiplexer) ValidateHandle(msg IncomingMessage) OutgoingMessage { func registerMultiplexer[T any](target *atomic.Value, dispatch []taggedMessageDispatcher[T]) { mp := make(map[Tag]T) - if existingMap := getMap[T](target); existingMap != nil { - for k, v := range existingMap { - mp[k] = v - } - } + existingMap := getMap[T](target) + maps.Copy(mp, existingMap) for _, v := range dispatch { if _, has := mp[v.Tag]; has { panic(fmt.Sprintf("Already registered a handler for tag %v", v.Tag)) diff --git a/network/netidentity_test.go b/network/netidentity_test.go index ce3240698f..03c345fe79 100644 --- a/network/netidentity_test.go +++ b/network/netidentity_test.go @@ -323,7 +323,7 @@ func TestIdentityTrackerRemoveIdentity(t *testing.T) { require.True(t, exists) // check that removing a peer who does not exist in the map (but whos identity does) - // not not result in the wrong peer being removed + // not result in the wrong peer being removed tracker.removeIdentity(&p2) _, exists = tracker.peersByID[p.identity] require.True(t, exists) diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 2c297f8479..9186074f01 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -64,7 +64,7 @@ type Service interface { NetworkNotify(network.Notifiee) NetworkStopNotify(network.Notifiee) - DialPeersUntilTargetCount(targetConnCount int) + DialPeersUntilTargetCount(targetConnCount int) bool ClosePeer(peer.ID) error Conns() []network.Conn @@ -76,9 +76,15 @@ type Service interface { GetHTTPClient(addrInfo *peer.AddrInfo, connTimeStore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) } +// subset of config.Local needed here +type nodeSubConfig interface { + IsHybridServer() bool +} + // serviceImpl manages integration with libp2p and implements the Service interface type serviceImpl struct { log logging.Logger + subcfg nodeSubConfig listenAddr string host host.Host streams *streamManager @@ -99,6 +105,8 @@ const AlgorandWsProtocolV22 = "/algorand-ws/2.2.0" const dialTimeout = 30 * time.Second +const psmdkDialed = "dialed" + // MakeHost creates a libp2p host but does not start listening. // Use host.Network().Listen() on the returned address to start listening. func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host.Host, string, error) { @@ -220,7 +228,7 @@ func SetPubSubPeerFilter(filter func(checker pstore.RoleChecker, pid peer.ID) bo // MakeService creates a P2P service instance func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandlers StreamHandlers, pubsubOptions ...PubSubOption) (*serviceImpl, error) { - sm := makeStreamManager(ctx, log, h, wsStreamHandlers, cfg.EnableGossipService) + sm := makeStreamManager(ctx, log, cfg, h, wsStreamHandlers, cfg.EnableGossipService) h.Network().Notify(sm) for _, pair := range wsStreamHandlers { @@ -232,12 +240,13 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho opt(&pubsubOpts) } - ps, err := makePubSub(ctx, cfg, h, pubsubOpts...) + ps, err := makePubSub(ctx, h, cfg.GossipFanout, pubsubOpts...) if err != nil { return nil, err } return &serviceImpl{ log: log, + subcfg: cfg, listenAddr: listenAddr, host: h, streams: sm, @@ -280,20 +289,29 @@ func (s *serviceImpl) IDSigner() *PeerIDChallengeSigner { } // DialPeersUntilTargetCount attempts to establish connections to the provided phonebook addresses -func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { +func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) bool { ps := s.host.Peerstore().(*pstore.PeerStore) addrInfos := ps.GetAddresses(targetConnCount, phonebook.RelayRole) conns := s.host.Network().Conns() var numOutgoingConns int for _, conn := range conns { if conn.Stat().Direction == network.DirOutbound { - numOutgoingConns++ + if s.subcfg.IsHybridServer() { + remotePeer := conn.RemotePeer() + val, err := s.host.Peerstore().Get(remotePeer, psmdkDialed) + if err == nil && val != nil && val.(bool) { + numOutgoingConns++ + } + } else { + numOutgoingConns++ + } } } + preExistingConns := numOutgoingConns for _, peerInfo := range addrInfos { // if we are at our target count stop trying to connect if numOutgoingConns >= targetConnCount { - return + return numOutgoingConns > preExistingConns } // if we are already connected to this peer, skip it if len(s.host.Network().ConnsToPeer(peerInfo.ID)) > 0 { @@ -302,8 +320,11 @@ func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { err := s.dialNode(context.Background(), peerInfo) // leaving the calls as blocking for now, to not over-connect beyond fanout if err != nil { s.log.Warnf("failed to connect to peer %s: %v", peerInfo.ID, err) + } else { + numOutgoingConns++ } } + return numOutgoingConns > preExistingConns } // dialNode attempts to establish a connection to the provided peer @@ -314,6 +335,11 @@ func (s *serviceImpl) dialNode(ctx context.Context, peer *peer.AddrInfo) error { } ctx, cancel := context.WithTimeout(ctx, dialTimeout) defer cancel() + if s.subcfg.IsHybridServer() { + if err := s.host.Peerstore().Put(peer.ID, psmdkDialed, true); err != nil { // mark this peer as explicitly dialed + return err + } + } return s.host.Connect(ctx, *peer) } diff --git a/network/p2p/peerstore/peerstore.go b/network/p2p/peerstore/peerstore.go index 5d0b2b24ca..83ee62c09e 100644 --- a/network/p2p/peerstore/peerstore.go +++ b/network/p2p/peerstore/peerstore.go @@ -35,7 +35,7 @@ import ( // of how many addresses the phonebook actually has. ( with the retry-after logic applied ) const getAllAddresses = math.MaxInt32 -const addressDataKey string = "addressData" +const psmdkAddressData string = "addressData" // PeerStore implements Peerstore and CertifiedAddrBook. type PeerStore struct { @@ -104,14 +104,14 @@ func (ps *PeerStore) UpdateRetryAfter(addr string, retryAfter time.Time) { if err != nil { return } - metadata, _ := ps.Get(info.ID, addressDataKey) + metadata, _ := ps.Get(info.ID, psmdkAddressData) if metadata != nil { ad, ok := metadata.(addressData) if !ok { return } ad.retryAfter = retryAfter - _ = ps.Put(info.ID, addressDataKey, ad) + _ = ps.Put(info.ID, psmdkAddressData, ad) } } @@ -128,7 +128,7 @@ func (ps *PeerStore) GetConnectionWaitTime(addrOrPeerID string) (bool, time.Dura defer ps.lock.Unlock() peerID := peer.ID(addrOrPeerID) - metadata, err := ps.Get(peerID, addressDataKey) + metadata, err := ps.Get(peerID, psmdkAddressData) if err != nil { return false, 0 /* not used */, curTime /* not used */ } @@ -153,7 +153,7 @@ func (ps *PeerStore) GetConnectionWaitTime(addrOrPeerID string) (bool, time.Dura ps.popNElements(numElmtsToRemove, peerID) // If there are max number of connections within the time window, wait - metadata, _ = ps.Get(peerID, addressDataKey) + metadata, _ = ps.Get(peerID, psmdkAddressData) ad, ok = metadata.(addressData) if !ok { return false, 0 /* not used */, curTime /* not used */ @@ -178,7 +178,7 @@ func (ps *PeerStore) UpdateConnectionTime(addrOrPeerID string, provisionalTime t defer ps.lock.Unlock() peerID := peer.ID(addrOrPeerID) - metadata, err := ps.Get(peerID, addressDataKey) + metadata, err := ps.Get(peerID, psmdkAddressData) if err != nil { return false } @@ -187,14 +187,14 @@ func (ps *PeerStore) UpdateConnectionTime(addrOrPeerID string, provisionalTime t return false } defer func() { - _ = ps.Put(peerID, addressDataKey, ad) + _ = ps.Put(peerID, psmdkAddressData, ad) }() // Find the provisionalTime and update it entry := ad.recentConnectionTimes for indx, val := range entry { - if provisionalTime == val { + if provisionalTime.Equal(val) { entry[indx] = time.Now() return true } @@ -221,7 +221,7 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []*peer.AddrInfo, networkName removeItems := make(map[peer.ID]bool, 0) peerIDs := ps.Peers() for _, pid := range peerIDs { - data, _ := ps.Get(pid, addressDataKey) + data, _ := ps.Get(pid, psmdkAddressData) if data != nil { ad := data.(addressData) updated := false @@ -234,20 +234,20 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []*peer.AddrInfo, networkName } } if updated { - _ = ps.Put(pid, addressDataKey, ad) + _ = ps.Put(pid, psmdkAddressData, ad) } } } for _, info := range addressesThey { - data, _ := ps.Get(info.ID, addressDataKey) + data, _ := ps.Get(info.ID, psmdkAddressData) if data != nil { // we already have this // update the networkName and role ad := data.(addressData) ad.networkNames[networkName] = true ad.roles.Add(role) - _ = ps.Put(info.ID, addressDataKey, ad) + _ = ps.Put(info.ID, psmdkAddressData, ad) // do not remove this entry delete(removeItems, info.ID) @@ -255,7 +255,7 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []*peer.AddrInfo, networkName // we don't have this item. add it. ps.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) entry := makePhonebookEntryData(networkName, role, false) - _ = ps.Put(info.ID, addressDataKey, entry) + _ = ps.Put(info.ID, psmdkAddressData, entry) } } @@ -273,25 +273,25 @@ func (ps *PeerStore) AddPersistentPeers(addrInfo []*peer.AddrInfo, networkName s defer ps.lock.Unlock() for _, info := range addrInfo { - data, _ := ps.Get(info.ID, addressDataKey) + data, _ := ps.Get(info.ID, psmdkAddressData) if data != nil { // we already have this. // Make sure the persistence field is set to true and overwrite the role ad := data.(addressData) ad.roles.AddPersistent(role) - _ = ps.Put(info.ID, addressDataKey, ad) + _ = ps.Put(info.ID, psmdkAddressData, ad) } else { // we don't have this item. add it. ps.AddAddrs(info.ID, info.Addrs, libp2p.PermanentAddrTTL) entry := makePhonebookEntryData(networkName, role, true) - _ = ps.Put(info.ID, addressDataKey, entry) + _ = ps.Put(info.ID, psmdkAddressData, entry) } } } // HasRole checks if the peer has the given role. func (ps *PeerStore) HasRole(peerID peer.ID, role phonebook.Role) bool { - data, err := ps.Get(peerID, addressDataKey) + data, err := ps.Get(peerID, psmdkAddressData) if err != nil || data == nil { return false } @@ -316,7 +316,7 @@ func makePhonebookEntryData(networkName string, role phonebook.Role, persistent } func (ps *PeerStore) deletePhonebookEntry(peerID peer.ID, networkName string) { - data, err := ps.Get(peerID, addressDataKey) + data, err := ps.Get(peerID, psmdkAddressData) if err != nil { return } @@ -325,33 +325,33 @@ func (ps *PeerStore) deletePhonebookEntry(peerID peer.ID, networkName string) { isEmpty := len(ad.networkNames) == 0 if isEmpty { ps.ClearAddrs(peerID) - _ = ps.Put(peerID, addressDataKey, nil) + _ = ps.Put(peerID, psmdkAddressData, nil) } } // appendTime adds the current time to recentConnectionTimes in // addressData of addr func (ps *PeerStore) appendTime(peerID peer.ID, t time.Time) { - data, _ := ps.Get(peerID, addressDataKey) + data, _ := ps.Get(peerID, psmdkAddressData) ad := data.(addressData) ad.recentConnectionTimes = append(ad.recentConnectionTimes, t) - _ = ps.Put(peerID, addressDataKey, ad) + _ = ps.Put(peerID, psmdkAddressData, ad) } // popNElements removes the earliest time from recentConnectionTimes in // addressData for addr // It is expected to be later than ConnectionsRateLimitingWindow func (ps *PeerStore) popNElements(n int, peerID peer.ID) { - data, _ := ps.Get(peerID, addressDataKey) + data, _ := ps.Get(peerID, psmdkAddressData) ad := data.(addressData) ad.recentConnectionTimes = ad.recentConnectionTimes[n:] - _ = ps.Put(peerID, addressDataKey, ad) + _ = ps.Put(peerID, psmdkAddressData, ad) } func (ps *PeerStore) filterRetryTime(t time.Time, role phonebook.Role) []*peer.AddrInfo { o := make([]*peer.AddrInfo, 0, len(ps.Peers())) for _, peerID := range ps.Peers() { - data, _ := ps.Get(peerID, addressDataKey) + data, _ := ps.Get(peerID, psmdkAddressData) if data != nil { ad := data.(addressData) if t.After(ad.retryAfter) && ad.roles.Has(role) { diff --git a/network/p2p/peerstore/peerstore_test.go b/network/p2p/peerstore/peerstore_test.go index ddf5e70fe9..fcff51d0c6 100644 --- a/network/p2p/peerstore/peerstore_test.go +++ b/network/p2p/peerstore/peerstore_test.go @@ -157,7 +157,7 @@ func TestArrayPhonebookAll(t *testing.T) { entry := makePhonebookEntryData("", phonebook.RelayRole, false) info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) - ph.Put(info.ID, addressDataKey, entry) + ph.Put(info.ID, psmdkAddressData, entry) } testPhonebookAll(t, infoSet, ph) } @@ -179,7 +179,7 @@ func TestArrayPhonebookUniform1(t *testing.T) { entry := makePhonebookEntryData("", phonebook.RelayRole, false) info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) - ph.Put(info.ID, addressDataKey, entry) + ph.Put(info.ID, psmdkAddressData, entry) } testPhonebookUniform(t, infoSet, ph, 1) } @@ -201,7 +201,7 @@ func TestArrayPhonebookUniform3(t *testing.T) { entry := makePhonebookEntryData("", phonebook.RelayRole, false) info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) - ph.Put(info.ID, addressDataKey, entry) + ph.Put(info.ID, psmdkAddressData, entry) } testPhonebookUniform(t, infoSet, ph, 3) } @@ -340,7 +340,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { require.Equal(t, true, addrInPhonebook) require.Equal(t, time.Duration(0), waitTime) require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) - data, _ := entries.Get(info1.ID, addressDataKey) + data, _ := entries.Get(info1.ID, psmdkAddressData) require.NotNil(t, data) ad := data.(addressData) phBookData := ad.recentConnectionTimes @@ -355,7 +355,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, time.Duration(0), waitTime) require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) - data, _ = entries.Get(info1.ID, addressDataKey) + data, _ = entries.Get(info1.ID, psmdkAddressData) ad = data.(addressData) phBookData = ad.recentConnectionTimes require.Equal(t, 2, len(phBookData)) @@ -370,7 +370,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, time.Duration(0), waitTime) require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) - data, _ = entries.Get(info1.ID, addressDataKey) + data, _ = entries.Get(info1.ID, psmdkAddressData) ad = data.(addressData) phBookData2 := ad.recentConnectionTimes require.Equal(t, 2, len(phBookData2)) @@ -390,7 +390,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { // introduce a gap between the two requests so that only the first will be removed later when waited // simulate passing a unit of time - data2, _ := entries.Get(info2.ID, addressDataKey) + data2, _ := entries.Get(info2.ID, psmdkAddressData) require.NotNil(t, data2) ad2 := data2.(addressData) for rct := range ad2.recentConnectionTimes { @@ -406,7 +406,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { require.Equal(t, time.Duration(0), waitTime) require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) - data2, _ = entries.Get(info2.ID, addressDataKey) + data2, _ = entries.Get(info2.ID, psmdkAddressData) ad2 = data2.(addressData) phBookData = ad2.recentConnectionTimes // all three times should be queued @@ -416,7 +416,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { _, waitTime, _ = entries.GetConnectionWaitTime(string(info2.ID)) require.Greater(t, int64(waitTime), int64(0)) // no element should be removed - data2, _ = entries.Get(info2.ID, addressDataKey) + data2, _ = entries.Get(info2.ID, psmdkAddressData) ad2 = data2.(addressData) phBookData2 = ad2.recentConnectionTimes require.Equal(t, phBookData[0], phBookData2[0]) @@ -432,7 +432,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { require.Equal(t, time.Duration(0), waitTime) require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) // only one element should be removed, and one added - data2, _ = entries.Get(info2.ID, addressDataKey) + data2, _ = entries.Get(info2.ID, psmdkAddressData) ad2 = data2.(addressData) phBookData2 = ad2.recentConnectionTimes require.Equal(t, 3, len(phBookData2)) diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index 568dc9b031..2e5906aeb3 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -20,7 +20,6 @@ import ( "context" "time" - "github.com/algorand/go-algorand/config" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/host" @@ -58,10 +57,24 @@ const TXTopicName = "algotx01" const incomingThreads = 20 // matches to number wsNetwork workers -func makePubSub(ctx context.Context, cfg config.Local, host host.Host, opts ...pubsub.Option) (*pubsub.PubSub, error) { - //defaultParams := pubsub.DefaultGossipSubParams() +// deriveGossipSubParams derives the gossip sub parameters from the cfg.GossipFanout value +// by using the same proportions as pubsub defaults - see GossipSubD, GossipSubDlo, etc. +func deriveGossipSubParams(numOutgoingConns int) pubsub.GossipSubParams { + params := pubsub.DefaultGossipSubParams() + params.D = numOutgoingConns + params.Dlo = params.D - 1 + if params.Dlo <= 0 { + params.Dlo = params.D + } + params.Dscore = params.D * 2 / 3 + params.Dout = params.D * 1 / 3 + return params +} +func makePubSub(ctx context.Context, host host.Host, numOutgoingConns int, opts ...pubsub.Option) (*pubsub.PubSub, error) { + gossipSubParams := deriveGossipSubParams(numOutgoingConns) options := []pubsub.Option{ + pubsub.WithGossipSubParams(gossipSubParams), pubsub.WithPeerScore(&pubsub.PeerScoreParams{ DecayInterval: pubsub.DefaultDecayInterval, DecayToZero: pubsub.DefaultDecayToZero, diff --git a/network/p2p/pubsub_test.go b/network/p2p/pubsub_test.go new file mode 100644 index 0000000000..70cb957148 --- /dev/null +++ b/network/p2p/pubsub_test.go @@ -0,0 +1,73 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + "testing" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestPubsub_GossipSubParamsBasic(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + + for _, fanout := range []int{4, 8} { + cfg.GossipFanout = fanout + + params := deriveGossipSubParams(cfg.GossipFanout) + + require.Equal(t, fanout, params.D) + require.Equal(t, fanout-1, params.Dlo) + require.Equal(t, fanout*2/3, params.Dscore) + require.Equal(t, fanout*1/3, params.Dout) + + // Sanity: other defaults are preserved (not zeroed). Avoid asserting exact values to reduce brittleness. + def := pubsub.DefaultGossipSubParams() + require.Equal(t, def.HeartbeatInitialDelay, params.HeartbeatInitialDelay) + require.Equal(t, def.HistoryLength, params.HistoryLength) + } +} + +func TestPubsub_GossipSubParamsEdgeCases(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // D = 1 => Dlo must not drop below 1 + cfg := config.GetDefaultLocal() + cfg.GossipFanout = 1 + p := deriveGossipSubParams(cfg.GossipFanout) + require.Equal(t, 1, p.D) + require.Equal(t, 1, p.Dlo) + require.Equal(t, 0, p.Dscore) + require.Equal(t, 0, p.Dout) + + // D = 0 => keep Dlo = D (0) instead of negative + cfg = config.GetDefaultLocal() + cfg.GossipFanout = 0 + p = deriveGossipSubParams(cfg.GossipFanout) + require.Equal(t, 0, p.D) + require.Equal(t, 0, p.Dlo) + require.Equal(t, 0, p.Dscore) + require.Equal(t, 0, p.Dout) +} diff --git a/network/p2p/streams.go b/network/p2p/streams.go index b44c48c6e3..f880a4f833 100644 --- a/network/p2p/streams.go +++ b/network/p2p/streams.go @@ -34,6 +34,7 @@ import ( type streamManager struct { ctx context.Context log logging.Logger + cfg nodeSubConfig host host.Host handlers StreamHandlers allowIncomingGossip bool @@ -45,10 +46,11 @@ type streamManager struct { // StreamHandler is called when a new bidirectional stream for a given protocol and peer is opened. type StreamHandler func(ctx context.Context, pid peer.ID, s network.Stream, incoming bool) -func makeStreamManager(ctx context.Context, log logging.Logger, h host.Host, handlers StreamHandlers, allowIncomingGossip bool) *streamManager { +func makeStreamManager(ctx context.Context, log logging.Logger, cfg nodeSubConfig, h host.Host, handlers StreamHandlers, allowIncomingGossip bool) *streamManager { return &streamManager{ ctx: ctx, log: log, + cfg: cfg, host: h, handlers: handlers, allowIncomingGossip: allowIncomingGossip, @@ -134,6 +136,20 @@ func (n *streamManager) Connected(net network.Network, conn network.Conn) { return } + // check if this is outgoing connection but made not by us (serviceImpl.dialNode) + // then it was made by some sub component like pubsub, ignore + if n.cfg.IsHybridServer() && conn.Stat().Direction == network.DirOutbound { + val, err := n.host.Peerstore().Get(remotePeer, psmdkDialed) + if err != nil || val != nil && !val.(bool) { + // not found or false value + n.log.Debugf("%s: ignoring non-dialed outgoing peer ID %s", localPeer.String(), remotePeer.String()) + return + } + if val == nil { + n.log.Warnf("%s: failed to get dialed status for %s", localPeer.String(), remotePeer.String()) + } + } + n.streamsLock.Lock() _, ok := n.streams[remotePeer] if ok { diff --git a/network/p2p/streams_test.go b/network/p2p/streams_test.go new file mode 100644 index 0000000000..93e26aecbf --- /dev/null +++ b/network/p2p/streams_test.go @@ -0,0 +1,118 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/test/partitiontest" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +// TestConnectedLogsNonDialedOutgoingConnection tests that the Connected function +// exits early for non-dialed outgoing connections by checking the log output +func TestStreamNonDialedOutgoingConnection(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + logBuffer := &strings.Builder{} + logger := logging.NewLogger() + logger.SetOutput(logBuffer) + logger.SetLevel(logging.Debug) + + cfg := config.GetDefaultLocal() + cfg.NetAddress = ":1" + cfg.EnableP2PHybridMode = true + cfg.P2PHybridNetAddress = ":2" + + pstore1, err := peerstore.NewPeerStore(nil, "test1") + require.NoError(t, err) + pstore2, err := peerstore.NewPeerStore(nil, "test2") + require.NoError(t, err) + + var dialerHost, listenerHost host.Host + var dialerSM, listenerSM *streamManager + + host1, _, err := MakeHost(cfg, t.TempDir(), pstore1) + require.NoError(t, err) + defer host1.Close() + + host2, _, err := MakeHost(cfg, t.TempDir(), pstore2) + require.NoError(t, err) + defer host2.Close() + + if host1.ID() < host2.ID() { + dialerHost = host1 + listenerHost = host2 + } else { + dialerHost = host2 + listenerHost = host1 + } + + // Make listenerHost listen on a port so we can connect to it + listenAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/0") + require.NoError(t, err) + err = listenerHost.Network().Listen(listenAddr) + require.NoError(t, err) + + ctx := context.Background() + handlers := StreamHandlers{} + dialerSM = makeStreamManager(ctx, logger, cfg, dialerHost, handlers, false) + listenerSM = makeStreamManager(ctx, logger, cfg, listenerHost, handlers, false) + + // Setup Connected notification + dialerHost.Network().Notify(dialerSM) + listenerHost.Network().Notify(listenerSM) + + logBuffer.Reset() + + listenerAddrs := listenerHost.Network().ListenAddresses() + require.NotEmpty(t, listenerAddrs, "listenerHost should have listening addresses") + dialerHost.Peerstore().AddAddrs(listenerHost.ID(), listenerAddrs, 1) + + // Connect dialerHost to listenerHost directly, not through dialNode + err = dialerHost.Connect(ctx, peer.AddrInfo{ + ID: listenerHost.ID(), + Addrs: listenerAddrs, + }) + require.NoError(t, err) + + require.Eventually(t, func() bool { + return len(dialerHost.Network().ConnsToPeer(listenerHost.ID())) > 0 + }, 5*time.Second, 50*time.Millisecond) + + conns := dialerHost.Network().ConnsToPeer(listenerHost.ID()) + require.Len(t, conns, 1) + require.Equal(t, network.DirOutbound, conns[0].Stat().Direction) + + // Check that the log contains the expected message for non-dialed outgoing connection + logOutput := logBuffer.String() + expectedMsg := "ignoring non-dialed outgoing peer ID" + require.Contains(t, logOutput, expectedMsg) + require.Contains(t, logOutput, listenerHost.ID().String()) +} diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 09db008e65..959d960ec7 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -56,10 +56,13 @@ type P2PNetwork struct { log logging.Logger config config.Local genesisInfo GenesisInfo - ctx context.Context - ctxCancel context.CancelFunc - peerStats map[peer.ID]*p2pPeerStats - peerStatsMu deadlock.Mutex + // voteCompressionTableSize is the validated/normalized table size for VP compression. + // It is set during setup() by validating config.StatefulVoteCompressionTableSize. + voteCompressionTableSize uint + ctx context.Context + ctxCancel context.CancelFunc + peerStats map[peer.ID]*p2pPeerStats + peerStatsMu deadlock.Mutex wg sync.WaitGroup @@ -76,6 +79,16 @@ type P2PNetwork struct { wsPeersConnectivityCheckTicker *time.Ticker peerStater peerConnectionStater + // connPerfMonitor is used on outgoing connections to measure their relative message timing + connPerfMonitor *connectionPerformanceMonitor + + // outgoingConnsCloser used to check number of outgoing connections and disconnect as needed. + // it is also used as a watchdog to help us detect connectivity issues ( such as cliques ) so that it monitors agreement protocol progress. + outgoingConnsCloser *outgoingConnsCloser + + // number of throttled outgoing connections "slots" needed to be populated. + throttledOutgoingConnections atomic.Int32 + meshUpdateRequests chan meshRequest mesher mesher meshCreator MeshCreator // save parameter to use in setup() @@ -337,8 +350,8 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo if cfg.EnableDHTProviders { disc, err0 := p2p.MakeCapabilitiesDiscovery(net.ctx, cfg, h, net.genesisInfo.NetworkID, net.log, bootstrapper.BootstrapFunc) if err0 != nil { - log.Errorf("Failed to create dht node capabilities discovery: %v", err) - return nil, err + log.Errorf("Failed to create dht node capabilities discovery: %v", err0) + return nil, err0 } net.capabilitiesDiscovery = disc } @@ -353,6 +366,9 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo } func (n *P2PNetwork) setup() error { + // Validate and normalize vote compression table size + n.voteCompressionTableSize = n.config.NormalizedVoteCompressionTableSize(n.log) + if n.broadcaster.slowWritingPeerMonitorInterval == 0 { n.broadcaster.slowWritingPeerMonitorInterval = slowWritingPeerMonitorInterval } @@ -364,6 +380,7 @@ func (n *P2PNetwork) setup() error { var err error n.mesher, err = meshCreator.create( withContext(n.ctx), + withTargetConnCount(n.config.GossipFanout), withMeshExpJitterBackoff(), withMeshNetMeshFn(n.meshThreadInner), withMeshPeerStatReporter(func() { @@ -376,11 +393,25 @@ func (n *P2PNetwork) setup() error { return fmt.Errorf("failed to create mesh: %w", err) } + n.connPerfMonitor = makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag, protocol.TxnTag}) + n.outgoingConnsCloser = makeOutgoingConnsCloser(n.log, n, n.connPerfMonitor, cliqueResolveInterval) + return nil } -func (n *P2PNetwork) p2pRelayPeerFilter(checker peerstore.RoleChecker, pid peer.ID) bool { - return !checker.HasRole(pid, phonebook.RelayRole) +func (n *P2PNetwork) outgoingPeers() (peers []Peer) { + n.wsPeersLock.RLock() + defer n.wsPeersLock.RUnlock() + for _, peer := range n.wsPeers { + if peer.outgoing { + peers = append(peers, Peer(peer)) + } + } + return peers +} + +func (n *P2PNetwork) numOutgoingPending() int { + return 0 } // PeerID returns this node's peer ID. @@ -401,6 +432,16 @@ func (n *P2PNetwork) Start() error { return err } + if n.relayMessages { + n.throttledOutgoingConnections.Store(int32(n.config.GossipFanout / 2)) + } else { + // on non-relay, all the outgoing connections are throttled. + n.throttledOutgoingConnections.Store(int32(n.config.GossipFanout)) + } + if n.config.DisableOutgoingConnectionThrottling { + n.throttledOutgoingConnections.Store(0) + } + wantTXGossip := n.relayMessages || n.config.ForceFetchTransactions || n.nodeInfo.IsParticipating() if wantTXGossip { n.wantTXGossip.Store(true) @@ -489,10 +530,7 @@ func (n *P2PNetwork) innerStop() { closeGroup.Wait() } -// meshThreadInner fetches nodes from DHT and attempts to connect to them -func (n *P2PNetwork) meshThreadInner() bool { - defer n.service.DialPeersUntilTargetCount(n.config.GossipFanout) - +func (n *P2PNetwork) refreshPeerStoreAddresses() { // fetch peers from DNS var dnsPeers, dhtPeers []peer.AddrInfo dnsPeers = dnsLookupBootstrapPeers(n.log, n.config, n.genesisInfo.NetworkID, dnsaddr.NewMultiaddrDNSResolveController(n.config.DNSSecurityTXTEnforced(), "")) @@ -531,7 +569,23 @@ func (n *P2PNetwork) meshThreadInner() bool { if len(peers) > 0 { n.pstore.ReplacePeerList(replace, string(n.genesisInfo.NetworkID), phonebook.RelayRole) } - return len(peers) > 0 +} + +// meshThreadInner fetches nodes from DHT and attempts to connect to them. +// It returns the number of peers connected. +func (n *P2PNetwork) meshThreadInner(targetConnCount int) int { + n.refreshPeerStoreAddresses() + for { //nolint:staticcheck // easier to read + if n.service.DialPeersUntilTargetCount(targetConnCount) { + break + } + if !n.outgoingConnsCloser.checkExistingConnectionsNeedDisconnecting(targetConnCount) { + // no connection were removed. + break + } + } + + return len(n.outgoingPeers()) } func (n *P2PNetwork) httpdThread() { @@ -596,22 +650,25 @@ func (n *P2PNetwork) Relay(ctx context.Context, tag protocol.Tag, data []byte, w // Disconnect from a peer, probably due to protocol errors. func (n *P2PNetwork) Disconnect(badpeer DisconnectablePeer) { + n.disconnect(badpeer, disconnectReasonNone) +} + +func (n *P2PNetwork) disconnect(badpeer Peer, reason disconnectReason) { var peerID peer.ID var wsp *wsPeer - n.wsPeersLock.Lock() - defer n.wsPeersLock.Unlock() switch p := badpeer.(type) { case *wsPeer: // Disconnect came from a message received via wsPeer + n.wsPeersLock.RLock() peerID, wsp = n.wsPeersToIDs[p], p + n.wsPeersLock.RUnlock() default: n.log.Warnf("Unknown peer type %T", badpeer) return } if wsp != nil { wsp.CloseAndWait(time.Now().Add(peerDisconnectionAckDuration)) - delete(n.wsPeers, peerID) - delete(n.wsPeersToIDs, wsp) + n.removePeer(wsp, peerID, reason) } else { n.log.Warnf("Could not find wsPeer reference for peer %s", peerID) } @@ -722,7 +779,7 @@ func (n *P2PNetwork) GetPeers(options ...PeerOption) []Peer { n.log.Debugf("Relay node(s) from peerstore: %v", addrs) } case PeersPhonebookArchivalNodes: - // query known archival nodes that came from from DHT if enabled (or DNS if configured) + // query known archival nodes that came from DHT if enabled (or DNS if configured) addrInfos := n.pstore.GetAddresses(numArchivalPeersToFind, phonebook.ArchivalRole) for _, peerInfo := range addrInfos { if peerInfo.ID == n.service.ID() { @@ -787,6 +844,7 @@ func (n *P2PNetwork) GetHTTPClient(address string) (*http.Client, error) { // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar // characteristics as with a watchdog timer. func (n *P2PNetwork) OnNetworkAdvance() { + n.outgoingConnsCloser.updateLastAdvance() if n.nodeInfo != nil { old := n.wantTXGossip.Load() new := n.relayMessages || n.config.ForceFetchTransactions || n.nodeInfo.IsParticipating() @@ -830,6 +888,16 @@ func (n *P2PNetwork) Config() config.Local { return n.config } +// StatefulVoteCompressionTableSize returns the validated/normalized vote compression table size. +func (n *P2PNetwork) StatefulVoteCompressionTableSize() uint { + return n.voteCompressionTableSize +} + +// VoteCompressionEnabled returns whether vote compression is enabled for this node. +func (n *P2PNetwork) VoteCompressionEnabled() bool { + return n.config.EnableVoteCompression +} + // wsStreamHandler is a callback that the p2p package calls when a new peer connects and establishes a // stream for the websocket protocol. // TODO: remove after consensus v41 takes effect. @@ -923,15 +991,27 @@ func (n *P2PNetwork) baseWsStreamHandler(ctx context.Context, p2pPeer peer.ID, s } peerCore := makePeerCore(ctx, n, n.log, n.handler.readBuffer, addr, client, addr) wsp := &wsPeer{ - wsPeerCore: peerCore, - conn: &wsPeerConnP2P{stream: stream}, - outgoing: !incoming, - identity: netIdentPeerID, - peerType: peerTypeP2P, - TelemetryGUID: pmi.telemetryID, - InstanceName: pmi.instanceName, - features: decodePeerFeatures(pmi.version, pmi.features), - enableVoteCompression: n.config.EnableVoteCompression, + wsPeerCore: peerCore, + conn: &wsPeerConnP2P{stream: stream}, + outgoing: !incoming, + identity: netIdentPeerID, + peerType: peerTypeP2P, + TelemetryGUID: pmi.telemetryID, + InstanceName: pmi.instanceName, + features: decodePeerFeatures(pmi.version, pmi.features), + enableVoteCompression: n.config.EnableVoteCompression, + voteCompressionTableSize: n.voteCompressionTableSize, + } + if !incoming { + throttledConnection := false + if n.throttledOutgoingConnections.Add(int32(-1)) >= 0 { + throttledConnection = true + } else { + n.throttledOutgoingConnections.Add(int32(1)) + } + + wsp.connMonitor = n.connPerfMonitor + wsp.throttledOutgoingConnection = throttledConnection } localAddr, has := n.Address() @@ -987,6 +1067,10 @@ func (n *P2PNetwork) baseWsStreamHandler(ctx context.Context, p2pPeer peer.ID, s // peerRemoteClose called from wsPeer to report that it has closed func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { remotePeerID := peer.conn.(*wsPeerConnP2P).stream.Conn().RemotePeer() + n.removePeer(peer, remotePeerID, reason) +} + +func (n *P2PNetwork) removePeer(peer *wsPeer, remotePeerID peer.ID, reason disconnectReason) { n.wsPeersLock.Lock() n.identityTracker.removeIdentity(peer) delete(n.wsPeers, remotePeerID) @@ -1014,6 +1098,9 @@ func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { AVCount: peer.avMessageCount.Load(), PPCount: peer.ppMessageCount.Load(), }) + if peer.throttledOutgoingConnection { + n.throttledOutgoingConnections.Add(int32(1)) + } } func (n *P2PNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) { diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 50c907f091..9c44f1f74c 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -72,6 +72,7 @@ func TestP2PSubmitTX(t *testing.T) { cfg := config.GetDefaultLocal() cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) genesisInfo := GenesisInfo{genesisID, config.Devtestnet} netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) @@ -162,6 +163,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg := config.GetDefaultLocal() cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) genesisInfo := GenesisInfo{genesisID, config.Devtestnet} netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) @@ -256,6 +258,7 @@ func TestP2PSubmitWS(t *testing.T) { cfg := config.GetDefaultLocal() cfg.NetAddress = "127.0.0.1:0" + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) genesisInfo := GenesisInfo{genesisID, config.Devtestnet} netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) @@ -352,7 +355,8 @@ func (s *mockService) AddrInfo() peer.AddrInfo { } } -func (s *mockService) DialPeersUntilTargetCount(targetConnCount int) { +func (s *mockService) DialPeersUntilTargetCount(targetConnCount int) bool { + return false } func (s *mockService) ClosePeer(peer peer.ID) error { @@ -398,8 +402,8 @@ func TestP2PNetworkAddress(t *testing.T) { cfg := config.GetDefaultLocal() log := logging.TestingLog(t) netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) - defer netA.Stop() require.NoError(t, err) + defer netA.Stop() addrInfo := netA.service.AddrInfo() // close the real service since we will substitute a mock one netA.service.Close() @@ -595,6 +599,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { cfg := config.GetDefaultLocal() cfg.NetAddress = "127.0.0.1:0" cfg.EnableDHTProviders = true + cfg.DNSBootstrapID = "" log := logging.TestingLog(t) genesisInfo := GenesisInfo{genesisID, config.Devtestnet} @@ -695,7 +700,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { // it appears there are artificial peers because of listening on localhost and on a real network interface // so filter out and save only unique peers by their IDs net := nets[idx] - net.meshThreadInner() // update peerstore with DHT peers + net.meshThreadInner(cfg.GossipFanout) // update peerstore with DHT peers peers := net.GetPeers(PeersPhonebookArchivalNodes) uniquePeerIDs := make(map[peer.ID]struct{}) for _, p := range peers { @@ -801,6 +806,8 @@ func TestP2PHTTPHandler(t *testing.T) { // zero clients allowed, rate limiting window (10s) is greater than queue deadline (1s) netB, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) + defer netB.Stop() // even though netB.Start is not called, NewP2PNetwork creates goroutines to stop + pstore, err := peerstore.MakePhonebook(0, 10*time.Second) require.NoError(t, err) pstore.AddPersistentPeers([]*peer.AddrInfo{&peerInfoA}, "net", phonebook.RelayRole) @@ -1059,11 +1066,13 @@ func TestP2PWantTXGossip(t *testing.T) { peerID := peer.ID("myPeerID") mockService := &mockSubPService{mockService: mockService{id: peerID}, shouldNextFail: true} net := &P2PNetwork{ - service: mockService, - log: logging.TestingLog(t), - ctx: ctx, - nodeInfo: &nopeNodeInfo{}, + service: mockService, + log: logging.TestingLog(t), + ctx: ctx, + nodeInfo: &nopeNodeInfo{}, + connPerfMonitor: makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag, protocol.TxnTag}), } + net.outgoingConnsCloser = makeOutgoingConnsCloser(logging.TestingLog(t), net, net.connPerfMonitor, cliqueResolveInterval) // ensure wantTXGossip from false to false is noop net.wantTXGossip.Store(false) @@ -1426,6 +1435,7 @@ func TestP2PTxTopicValidator_NoWsPeer(t *testing.T) { net, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) + defer net.Stop() peerID := peer.ID("12345678") // must be 8+ in size msg := pubsub.Message{Message: &pb.Message{}, ID: string(peerID)} @@ -1455,6 +1465,7 @@ func TestGetPeersFiltersSelf(t *testing.T) { net, err := NewP2PNetwork(log, cfg, t.TempDir(), []string{}, GenesisInfo{"test-genesis", "test-network"}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) + defer net.Stop() selfID := net.service.ID() // Create and add self diff --git a/network/vpack/README.md b/network/vpack/README.md index 1bb6dcd2c5..23cbfbfafd 100644 --- a/network/vpack/README.md +++ b/network/vpack/README.md @@ -1,11 +1,21 @@ -# Stateless *vpack* wire format +# *vpack* wire format -This document specifies the byte‑level (on‑wire) layout produced by `StatelessEncoder.CompressVote` and accepted by `StatelessDecoder.DecompressVote`. +This document specifies the byte‑level (on‑wire) layout for vote compression. The goal is to minimize vote size while retaining a 1‑to‑1, loss‑free mapping to the canonical msgpack representation of `agreement.UnauthenticatedVote`. The canonical msgpack representation we rely on is provided by agreement/msgp_gen.go, generated by our [custom msgpack code generator](https://github.com/algorand/msgp) which ensures fields are generated in lexicographic order, omit empty key-value pairs, and use specific formats for certain types as defined in [our specification](https://github.com/algorandfoundation/specs/blob/c0331123148971e4705f25b9c937cb23e5ee28d1/dev/crypto.md#L22-L40). +## Compression Layers + +Vote compression uses two layers: + +1. **Stateless compression** (`StatelessEncoder`/`StatelessDecoder`): Removes msgpack formatting and field names, replacing them with a bitmask. This is always applied and has no memory overhead. + +2. **Stateful compression (optional)** (`StatefulEncoder`/`StatefulDecoder`): Further compresses by replacing frequently repeated values with references to LRU tables and a sliding window. This layer requires per-connection state (configurable size, e.g., ~224KB per direction for 1024-entry tables) and is optional. When used, it operates on the output of the stateless layer. + +Both layers use the same 2-byte header format, with byte 0 used by the stateless layer, and byte 1 used by the stateful layer. + --- ## 1. High‑level structure @@ -26,10 +36,10 @@ No field names appear, only values. | Offset | Description | | ------ | -------------------------------------------------------------- | -| `0` | Presence flags for optional values (LSB first, see table). | -| `1` | Reserved, currently zero. | +| `0` | Presence flags for optional values (LSB first, see §2.1). | +| `1` | Stateful compression flags (zero if not using stateful layer, see §2.2). | -### 2.1 Bit‑mask layout (byte 0) +### 2.1 Stateless bit‑mask layout (byte 0) | Bit | Flag | Field enabled | Encoded size | | --- | ----------- | -------------------------------- | ------------ | @@ -48,25 +58,45 @@ Integers use msgpack's variable-length unsigned integer encoding: - `uint32` 5 bytes in length (marker byte 0xce + 4-byte value) - `uint64` 9 bytes in length (marker byte 0xcf + 8-byte value) +### 2.2 Stateful compression flags (byte 1) + +When stateful compression is used, byte 1 encodes which values have been replaced by references. When stateful compression is not used, byte 1 must be zero. + +| Bits | Flag | Field(s) | Meaning | +| ---- | ---------- | ---------------------------------- | ----------------------------------------------------------- | +| 0-1 | `rndDelta` | `r.rnd` delta encoding | `00`=literal, `01`=+1, `10`=-1, `11`=same as last round | +| 2-4 | `propRef` | `r.prop` window reference | `000`=literal, `001`-`111`=sliding window index (1-7) | +| 5 | `sndRef` | `r.snd` reference | `0`=literal (32 bytes), `1`=LRU table reference (2 bytes) | +| 6 | `pkRef` | `sig.p` + `sig.p1s` reference | `0`=literal (96 bytes), `1`=LRU table reference (2 bytes) | +| 7 | `pk2Ref` | `sig.p2` + `sig.p2s` reference | `0`=literal (96 bytes), `1`=LRU table reference (2 bytes) | + +The stateful layer uses: +- **Round delta encoding**: Most votes reference the same round as before, with some interleaving votes between the current and next round, as voters gradually observe consensus and move on to the next round. A 2-bit encoding specifies whether `r.rnd` is the same as the previous vote, or has increased or decreased by one, and can be omitted from the message for these cases. If `rndDelta` is `00`, the literal value appears in the message. +- **Proposal sliding window**: A 7-entry HPACK-style window tracks recent proposal values. In a typical round, all votes should be for the same proposal value, compressing previously-seen proposal values from ~96 bytes down to a 3-bit reference. +- **LRU tables**: Three 2-way set-associative hash tables cache recently seen values for sender addresses, and two tiers of (public key, signature) pairs. Since some consensus participants vote in rounds more often than others, these tables record the most common field values re-used across votes by the same participant, and replace them with references: + - The `snd` table tracks participating addresses, which are re-used across all votes by a participant; + - the `pk` table tracks `sig.pk` and `sig.p1s` values, which are re-used across votes in the same round by a participant; + - and the `pk2` table tracks `sig.p2` and `sig.p2s` values, which are re-used across all votes in a batch by a participant (typically thousands of rounds), under Algorand's hierarchical consensus signature scheme. + --- ## 3. Field serialization order -After the 2-byte header, the encoder emits values in the following order: - -| Field | Type | Encoded size | Presence flag | -| -------------- | ------------------------------ | ------------ | ------------- | -| `pf` | VRF credential | 80 bytes | Required | -| `r.per` | Period | varuint | `bitPer` | -| `r.prop.dig` | Proposal digest | 32 bytes | `bitDig` | -| `r.prop.encdig`| Digest of encoded proposal | 32 bytes | `bitEncDig` | -| `r.prop.oper` | Proposal's original period | varuint | `bitOper` | -| `r.prop.oprop` | Proposal's original proposer | 32 bytes | `bitOprop` | -| `r.rnd` | Round number | varuint | Required | -| `r.snd` | Voter's (sender) address | 32 bytes | Required | -| `r.step` | Step | varuint | `bitStep` | -| `sig.p` | Ed25519 public key | 32 bytes | Required | -| `sig.p1s` | Signature over offset ID | 64 bytes | Required | -| `sig.p2` | Second-tier Ed25519 public key | 32 bytes | Required | -| `sig.p2s` | Signature over batch ID | 64 bytes | Required | -| `sig.s` | Signature over vote using `p` | 64 bytes | Required | +After the 2-byte header, the encoder emits values in the following order. If stateful compression is enabled, the "stateful encoding" column specifies how each field is additionally transformed or omitted. + +| Field | Type | Encoded size | Presence flag | Stateful encoding | +| -------------- | ------------------------------ | ------------ | ------------- | -------------------------------- | +| `pf` | VRF credential | 80 bytes | Required | Unchanged | +| `r.per` | Period | varuint | `bitPer` | Unchanged | +| `r.prop.dig` | Proposal digest | 32 bytes | `bitDig` | Omitted if `propRef` set | +| `r.prop.encdig`| Digest of encoded proposal | 32 bytes | `bitEncDig` | Omitted if `propRef` set | +| `r.prop.oper` | Proposal's original period | varuint | `bitOper` | Omitted if `propRef` set | +| `r.prop.oprop` | Proposal's original proposer | 32 bytes | `bitOprop` | Omitted if `propRef` set | +| `r.rnd` | Round number | varuint | Required | Omitted if `rndDelta` set | +| `r.snd` | Voter's (sender) address | 32 bytes | Required | 2-byte reference if `sndRef` set | +| `r.step` | Step | varuint | `bitStep` | literal (if present) | +| `sig.p` | Ed25519 public key | 32 bytes | Required | 2-byte reference if `pkRef` set | +| `sig.p1s` | Signature over offset ID | 64 bytes | Required | Omitted if `pkRef` set | +| `sig.p2` | Second-tier Ed25519 public key | 32 bytes | Required | 2-byte reference if `pk2Ref` set | +| `sig.p2s` | Signature over batch ID | 64 bytes | Required | Omitted if `pk2Ref` set | +| `sig.s` | Signature over vote using `p` | 64 bytes | Required | Unchanged | diff --git a/network/vpack/dynamic_vpack.go b/network/vpack/dynamic_vpack.go new file mode 100644 index 0000000000..ea324cd2ae --- /dev/null +++ b/network/vpack/dynamic_vpack.go @@ -0,0 +1,617 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/algorand/msgp/msgp" +) + +// The second byte in the header is used by StatefulEncoder and +// StatefulDecoder to signal which values have been replaced +// by references. +// For r.prop, 3 bits are used to encode the reference directly. +// For r.rnd, a 2-bit delta encoding is used. +// +// 7 6 5 4 3 2 1 0 +// | | | \___/ \_/-- rnd encoding (00=literal, 01=+1, 10=-1, 11=same as last rnd) +// | | | `-------- prop window reference (000=literal, 001...111=window index) +// | | +------------ snd table reference (0=literal, 1=table) +// | +-------------- (sig.p, sig.p1s) table reference (0=literal, 1=table) +// +---------------- (sig.p2, sig.p2s) table reference (0=literal, 1=table) +const ( + // bits 0-1: rnd delta encoding + hdr1RndMask = 0b00000011 + hdr1RndDeltaSame = 0b11 + hdr1RndDeltaPlus1 = 0b01 + hdr1RndDeltaMinus1 = 0b10 + hdr1RndLiteral = 0b00 + + // bits 2-4: proposal-bundle reference (value<<2) + hdr1PropShift = 2 + hdr1PropMask = 0b00011100 + + // bits 5-7: whether snd, pk, pk2 are dynamic table references + hdr1SndRef = 1 << 5 + hdr1PkRef = 1 << 6 + hdr1Pk2Ref = 1 << 7 + + // sizes used below + pfSize = 80 // committee.VrfProof + digestSize = 32 // crypto.Digest (and basics.Address) + sigSize = 64 // crypto.Signature + pkSize = 32 // crypto.PublicKey +) + +// StatefulEncoder compresses votes by using references to previously seen values +// from earlier votes. +type StatefulEncoder struct{ dynamicTableState } + +// StatefulDecoder decompresses votes by using references to previously seen values +// from earlier votes. +type StatefulDecoder struct{ dynamicTableState } + +// NewStatefulEncoder creates a new StatefulEncoder with initialized LRU tables of the specified size +func NewStatefulEncoder(tableSize uint) (*StatefulEncoder, error) { + e := &StatefulEncoder{} + if err := e.initTables(tableSize); err != nil { + return nil, err + } + return e, nil +} + +// NewStatefulDecoder creates a new StatefulDecoder with initialized LRU tables of the specified size +func NewStatefulDecoder(tableSize uint) (*StatefulDecoder, error) { + d := &StatefulDecoder{} + if err := d.initTables(tableSize); err != nil { + return nil, err + } + return d, nil +} + +// dynamicTableState is shared by StatefulEncoder and StatefulDecoder. It contains +// the necessary state for tracking references to previously seen values. +type dynamicTableState struct { + // LRU hash tables for snd, p+p1s, and p2+p2s + sndTable *lruTable[addressValue] // 512 * 2 * 32 = 32KB + pkTable *lruTable[pkSigPair] // 512 * 2 * 96 = 96KB + pk2Table *lruTable[pkSigPair] // 512 * 2 * 96 = 96KB + + // 8-slot window of recent proposal values + proposalWindow propWindow + + // last round number seen in previous vote + lastRnd uint64 +} + +// pkSigPair is a 32-byte public key + 64-byte signature +// used for the LRU tables for p+p1s and p2+p2s. +type pkSigPair struct { + pk [pkSize]byte + sig [sigSize]byte +} + +func (p *pkSigPair) hash() uint64 { + // Since pk and sig should already be uniformly distributed, we can use a + // simple XOR of the first 8 bytes of each to get a good hash. + // Any invalid votes intentionally designed to cause collisions will only + // affect the sending peer's own per-peer compression state, and cause + // agreement to disconnect the peer. + return binary.LittleEndian.Uint64(p.pk[:8]) ^ binary.LittleEndian.Uint64(p.sig[:8]) +} + +// addressValue is a 32-byte address used for the LRU table for snd. +type addressValue [digestSize]byte + +func (v *addressValue) hash() uint64 { + // addresses are fairly uniformly distributed, so we can use a simple XOR + return binary.LittleEndian.Uint64(v[:8]) ^ binary.LittleEndian.Uint64(v[8:16]) ^ + binary.LittleEndian.Uint64(v[16:24]) ^ binary.LittleEndian.Uint64(v[24:]) +} + +// initTables initializes the LRU tables with the specified size for all tables +func (s *dynamicTableState) initTables(tableSize uint) error { + var err error + if s.sndTable, err = newLRUTable[addressValue](tableSize); err != nil { + return err + } + if s.pkTable, err = newLRUTable[pkSigPair](tableSize); err != nil { + return err + } + if s.pk2Table, err = newLRUTable[pkSigPair](tableSize); err != nil { + return err + } + return nil +} + +// statefulReader helps StatefulEncoder and StatefulDecoder to read from a +// source buffer with bounds checking. +type statefulReader struct { + src []byte + pos int +} + +func (r *statefulReader) readFixed(n int, field string) ([]byte, error) { + if r.pos+n > len(r.src) { + return nil, fmt.Errorf("truncated %s", field) + } + data := r.src[r.pos : r.pos+n] + r.pos += n + return data, nil +} + +func (r *statefulReader) readVaruintBytes(field string) ([]byte, error) { + if r.pos+1 > len(r.src) { + return nil, fmt.Errorf("truncated %s marker", field) + } + more, err := msgpVaruintRemaining(r.src[r.pos]) + if err != nil { + return nil, fmt.Errorf("invalid %s marker: %w", field, err) + } + total := 1 + more + if r.pos+total > len(r.src) { + return nil, fmt.Errorf("truncated %s", field) + } + data := r.src[r.pos : r.pos+total] + r.pos += total + return data, nil +} + +func (r *statefulReader) readVaruint(field string) ([]byte, uint64, error) { + data, err := r.readVaruintBytes(field) + if err != nil { + return nil, 0, err + } + // decode: readVaruintBytes has already validated the marker + var value uint64 + switch len(data) { + case 1: // fixint (values 0-127) + value = uint64(data[0]) + case 2: // uint8 (marker + uint8) + value = uint64(data[1]) + case 3: // uint16 (marker + uint16) + value = uint64(binary.BigEndian.Uint16(data[1:])) + case 5: // uint32 (marker + uint32) + value = uint64(binary.BigEndian.Uint32(data[1:])) + case 9: // uint64 (marker + uint64) + value = binary.BigEndian.Uint64(data[1:]) + default: + return nil, 0, fmt.Errorf("readVaruint: %s unexpected length %d", field, len(data)) + } + + return data, value, nil +} + +// readDynamicRef reads an LRU table reference ID from the statefulReader. +func (r *statefulReader) readDynamicRef(field string) (lruTableReferenceID, error) { + if r.pos+2 > len(r.src) { + return 0, fmt.Errorf("truncated %s", field) + } + id := binary.BigEndian.Uint16(r.src[r.pos : r.pos+2]) + r.pos += 2 + return lruTableReferenceID(id), nil +} + +// appendDynamicRef encodes an LRU table reference ID and appends it to dst. +func appendDynamicRef(dst []byte, id lruTableReferenceID) []byte { + return binary.BigEndian.AppendUint16(dst, uint16(id)) +} + +// Compress takes a vote compressed by StatelessEncoder, and additionally +// compresses it using dynamic references to previously seen values. +func (e *StatefulEncoder) Compress(dst, src []byte) ([]byte, error) { + r := statefulReader{src: src, pos: 0} + + // Read header + header, err := r.readFixed(2, "header") + if err != nil { + return nil, errors.New("src too short") + } + hdr0 := header[0] // from StatelessEncoder + var hdr1 byte // StatefulEncoder header + + // prepare output, leave room for 2-byte header + out := dst[:0] + out = append(out, hdr0, 0) // will fill in with hdr1 later + + // cred.pf: pass through + pf, err := r.readFixed(pfSize, "pf") + if err != nil { + return nil, err + } + out = append(out, pf...) + + // r.per: pass through, if present + if (hdr0 & bitPer) != 0 { + perData, err1 := r.readVaruintBytes("r.per") + if err1 != nil { + return nil, err1 + } + out = append(out, perData...) + } + + // r.prop: check LRU window + // copy proposal fields for table lookup + var prop proposalEntry + if (hdr0 & bitDig) != 0 { + dig, err1 := r.readFixed(digestSize, "dig") + if err1 != nil { + return nil, err1 + } + copy(prop.dig[:], dig) + } + if (hdr0 & bitEncDig) != 0 { + encdig, err1 := r.readFixed(digestSize, "encdig") + if err1 != nil { + return nil, err1 + } + copy(prop.encdig[:], encdig) + } + if (hdr0 & bitOper) != 0 { + operData, err1 := r.readVaruintBytes("oper") + if err1 != nil { + return nil, err1 + } + copy(prop.operEnc[:], operData) + prop.operLen = uint8(len(operData)) + } + if (hdr0 & bitOprop) != 0 { + oprop, err1 := r.readFixed(digestSize, "oprop") + if err1 != nil { + return nil, err1 + } + copy(prop.oprop[:], oprop) + } + prop.mask = hdr0 & propFieldsMask + + if idx := e.proposalWindow.lookup(prop); idx != 0 { + hdr1 |= byte(idx) << hdr1PropShift // set 001..111 + } else { + // not found: send literal and add to window (don't touch hdr1) + e.proposalWindow.insertNew(prop) + // write proposal bytes as StatelessEncoder would + if (hdr0 & bitDig) != 0 { + out = append(out, prop.dig[:]...) + } + if (hdr0 & bitEncDig) != 0 { + out = append(out, prop.encdig[:]...) + } + if (hdr0 & bitOper) != 0 { + out = append(out, prop.operEnc[:prop.operLen]...) + } + if (hdr0 & bitOprop) != 0 { + out = append(out, prop.oprop[:]...) + } + } + + // r.rnd: perform delta encoding + rndData, rnd, err := r.readVaruint("rnd") + if err != nil { + return nil, err + } + + switch { // delta encoding + case rnd == e.lastRnd: + hdr1 |= hdr1RndDeltaSame + case rnd == e.lastRnd+1 && e.lastRnd < math.MaxUint64: // avoid overflow + hdr1 |= hdr1RndDeltaPlus1 + case rnd == e.lastRnd-1 && e.lastRnd > 0: // avoid underflow + hdr1 |= hdr1RndDeltaMinus1 + default: + // pass through literal bytes (don't touch hdr1) + out = append(out, rndData...) + } + e.lastRnd = rnd + + // r.snd: check LRU table + sndData, err := r.readFixed(digestSize, "sender") + if err != nil { + return nil, err + } + var snd addressValue + copy(snd[:], sndData) + sndH := snd.hash() + if id, ok := e.sndTable.lookup(snd, sndH); ok { + // found in table, use reference + hdr1 |= hdr1SndRef + out = appendDynamicRef(out, id) + } else { // not found, add to table and use literal + out = append(out, snd[:]...) + e.sndTable.insert(snd, sndH) + } + + // r.step: pass through, if present + if (hdr0 & bitStep) != 0 { + stepData, err1 := r.readVaruintBytes("step") + if err1 != nil { + return nil, err1 + } + out = append(out, stepData...) + } + + // sig.p + sig.p1s: check LRU table + pkBundle, err := r.readFixed(pkSize+sigSize, "pk bundle") + if err != nil { + return nil, err + } + var pk pkSigPair + copy(pk.pk[:], pkBundle[:pkSize]) + copy(pk.sig[:], pkBundle[pkSize:]) + + pkH := pk.hash() + if id, ok := e.pkTable.lookup(pk, pkH); ok { + // found in table, use reference + hdr1 |= hdr1PkRef + out = appendDynamicRef(out, id) + } else { // not found, add to table and use literal + out = append(out, pk.pk[:]...) + out = append(out, pk.sig[:]...) + e.pkTable.insert(pk, pkH) + } + + // sig.p2 + sig.p2s: check LRU table + pk2Bundle, err := r.readFixed(pkSize+sigSize, "pk2 bundle") + if err != nil { + return nil, err + } + var pk2 pkSigPair + copy(pk2.pk[:], pk2Bundle[:pkSize]) + copy(pk2.sig[:], pk2Bundle[pkSize:]) + + pk2H := pk2.hash() + if id, ok := e.pk2Table.lookup(pk2, pk2H); ok { + // found in table, use reference + hdr1 |= hdr1Pk2Ref + out = appendDynamicRef(out, id) + } else { // not found, add to table and use literal + out = append(out, pk2.pk[:]...) + out = append(out, pk2.sig[:]...) + e.pk2Table.insert(pk2, pk2H) + } + + // sig.s: pass through + sigs, err := r.readFixed(sigSize, "sig.s") + if err != nil { + return nil, err + } + out = append(out, sigs...) + + if r.pos != len(src) { + return nil, fmt.Errorf("length mismatch: expected %d, got %d", len(src), r.pos) + } + + // fill in stateful header (hdr0 is unchanged) + out[1] = hdr1 + return out, nil +} + +// Decompress reverses StatefulEncoder, and writes a valid stateless vpack +// format buffer into dst. Caller must then pass it to StatelessDecoder. +func (d *StatefulDecoder) Decompress(dst, src []byte) ([]byte, error) { + r := statefulReader{src: src, pos: 0} + + // Read header + header, err := r.readFixed(2, "header") + if err != nil { + return nil, errors.New("input shorter than header") + } + hdr0 := header[0] // from StatelessEncoder + hdr1 := header[1] // from StatefulEncoder + + // prepare out; stateless size <= original + out := dst[:0] + out = append(out, hdr0, 0) // StatelessDecoder-compatible header + + // cred.pf: pass through + pf, err := r.readFixed(pfSize, "pf") + if err != nil { + return nil, err + } + out = append(out, pf...) + + // r.per: pass through, if present + if (hdr0 & bitPer) != 0 { + perData, err1 := r.readVaruintBytes("per") + if err1 != nil { + return nil, err1 + } + out = append(out, perData...) + } + + // r.prop: check for reference to LRU window + var prop proposalEntry + propRef := (hdr1 & hdr1PropMask) >> hdr1PropShift // index in range [0, 7] + if propRef == 0 { // literal follows + if (hdr0 & bitDig) != 0 { + dig, err1 := r.readFixed(digestSize, "digest") + if err1 != nil { + return nil, err1 + } + copy(prop.dig[:], dig) + } + if (hdr0 & bitEncDig) != 0 { + encdig, err1 := r.readFixed(digestSize, "encdig") + if err1 != nil { + return nil, err1 + } + copy(prop.encdig[:], encdig) + } + if (hdr0 & bitOper) != 0 { + operData, err1 := r.readVaruintBytes("oper") + if err1 != nil { + return nil, err1 + } + copy(prop.operEnc[:], operData) + prop.operLen = uint8(len(operData)) + } + if (hdr0 & bitOprop) != 0 { + oprop, err1 := r.readFixed(digestSize, "oprop") + if err1 != nil { + return nil, err1 + } + copy(prop.oprop[:], oprop) + } + prop.mask = hdr0 & propFieldsMask + // add literal to the proposal window + d.proposalWindow.insertNew(prop) + } else { // reference index 1-7 + var ok bool + prop, ok = d.proposalWindow.byRef(int(propRef)) + if !ok { + return nil, fmt.Errorf("bad proposal ref: %v", propRef) + } + } + + // write proposal bytes (from either literal or reference) + if (prop.mask & bitDig) != 0 { + out = append(out, prop.dig[:]...) + } + if (prop.mask & bitEncDig) != 0 { + out = append(out, prop.encdig[:]...) + } + if (prop.mask & bitOper) != 0 { + out = append(out, prop.operEnc[:prop.operLen]...) + } + if (prop.mask & bitOprop) != 0 { + out = append(out, prop.oprop[:]...) + } + + // r.rnd: perform delta decoding + var rnd uint64 + switch hdr1 & hdr1RndMask { + case hdr1RndDeltaSame: + rnd = d.lastRnd + out = msgp.AppendUint64(out, rnd) + case hdr1RndDeltaPlus1: + if d.lastRnd == math.MaxUint64 { + return nil, fmt.Errorf("round overflow: lastRnd %d", d.lastRnd) + } + rnd = d.lastRnd + 1 + out = msgp.AppendUint64(out, rnd) + case hdr1RndDeltaMinus1: + if d.lastRnd == 0 { + return nil, fmt.Errorf("round underflow: lastRnd %d", d.lastRnd) + } + rnd = d.lastRnd - 1 + out = msgp.AppendUint64(out, rnd) + case hdr1RndLiteral: + rndData, rndVal, err1 := r.readVaruint("rnd") + if err1 != nil { + return nil, err1 + } + rnd = rndVal + out = append(out, rndData...) + } + d.lastRnd = rnd + + // r.snd: check for reference to LRU table + if (hdr1 & hdr1SndRef) != 0 { // reference + id, err1 := r.readDynamicRef("snd ref") + if err1 != nil { + return nil, err1 + } + addr, ok := d.sndTable.fetch(id) + if !ok { + return nil, fmt.Errorf("bad sender ref: %v", id) + } + out = append(out, addr[:]...) + } else { // literal + sndData, err1 := r.readFixed(digestSize, "sender") + if err1 != nil { + return nil, err1 + } + var addr addressValue + copy(addr[:], sndData) + out = append(out, addr[:]...) + d.sndTable.insert(addr, addr.hash()) + } + + // r.step: pass through, if present + if (hdr0 & bitStep) != 0 { + stepData, err1 := r.readVaruintBytes("step") + if err1 != nil { + return nil, err1 + } + out = append(out, stepData...) + } + + // sig.p + p1s: check for reference to LRU table + if (hdr1 & hdr1PkRef) != 0 { // reference + id, err1 := r.readDynamicRef("pk ref") + if err1 != nil { + return nil, err1 + } + pkb, ok := d.pkTable.fetch(id) + if !ok { + return nil, fmt.Errorf("bad pk ref: %v", id) + } + out = append(out, pkb.pk[:]...) + out = append(out, pkb.sig[:]...) + } else { // literal + pkBundle, err1 := r.readFixed(pkSize+sigSize, "pk bundle") + if err1 != nil { + return nil, err1 + } + var pkb pkSigPair + copy(pkb.pk[:], pkBundle[:pkSize]) + copy(pkb.sig[:], pkBundle[pkSize:]) + out = append(out, pkb.pk[:]...) + out = append(out, pkb.sig[:]...) + d.pkTable.insert(pkb, pkb.hash()) + } + + // sig.p2 + p2s: check for reference to LRU table + if (hdr1 & hdr1Pk2Ref) != 0 { // reference + id, err1 := r.readDynamicRef("pk2 ref") + if err1 != nil { + return nil, err1 + } + pk2b, ok := d.pk2Table.fetch(id) + if !ok { + return nil, fmt.Errorf("bad pk2 ref: %v", id) + } + out = append(out, pk2b.pk[:]...) + out = append(out, pk2b.sig[:]...) + } else { // literal + pk2Bundle, err1 := r.readFixed(pkSize+sigSize, "pk2 bundle") + if err1 != nil { + return nil, err1 + } + var pk2b pkSigPair + copy(pk2b.pk[:], pk2Bundle[:pkSize]) + copy(pk2b.sig[:], pk2Bundle[pkSize:]) + out = append(out, pk2b.pk[:]...) + out = append(out, pk2b.sig[:]...) + d.pk2Table.insert(pk2b, pk2b.hash()) + } + + // sig.s: pass through + sigs, err := r.readFixed(sigSize, "sig.s") + if err != nil { + return nil, err + } + out = append(out, sigs...) + + if r.pos != len(src) { + return nil, fmt.Errorf("length mismatch: expected %d, got %d", len(src), r.pos) + } + return out, nil +} diff --git a/network/vpack/dynamic_vpack_test.go b/network/vpack/dynamic_vpack_test.go new file mode 100644 index 0000000000..af8ce5f747 --- /dev/null +++ b/network/vpack/dynamic_vpack_test.go @@ -0,0 +1,469 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +import ( + "math" + "reflect" + "slices" + "testing" + "unsafe" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +// TestStatefulEncoderDecoderSequence verifies that a StatefulEncoder/StatefulDecoder +// pair can be reused across multiple votes while preserving correctness. +func TestStatefulEncoderDecoderSequence(t *testing.T) { + partitiontest.PartitionTest(t) + + const numVotes = 30 + + // Stateless encoder/decoder used as front/back before Stateful layer + stEnc := NewStatelessEncoder() + stDec := NewStatelessDecoder() + + enc, err := NewStatefulEncoder(1024) + require.NoError(t, err) + dec, err := NewStatefulDecoder(1024) + require.NoError(t, err) + + voteGen := generateRandomVote() + + for i := 0; i < numVotes; i++ { + v0 := voteGen.Example(i) + + // Ensure PKSigOld is zero to satisfy encoder expectations + v0.Sig.PKSigOld = [64]byte{} + + // Encode to msgpack and bounds-check size + msgpackBuf := protocol.EncodeMsgp(v0) + require.LessOrEqual(t, len(msgpackBuf), MaxMsgpackVoteSize) + + // First layer: stateless compression + statelessBuf, err := stEnc.CompressVote(nil, msgpackBuf) + require.NoError(t, err) + + // Second layer: stateful compression + encBuf, err := enc.Compress(nil, statelessBuf) + require.NoError(t, err, "Vote %d failed to compress", i) + // size sanity: compressed should not exceed stateless size + require.LessOrEqual(t, len(encBuf), len(statelessBuf)) + + // Reverse: stateful decompress → stateless + statelessOut, err := dec.Decompress(nil, encBuf) + require.NoError(t, err, "Vote %d failed to decompress", i) + + // Reverse: stateless decompress → msgpack + msgpackOut, err := stDec.DecompressVote(nil, statelessOut) + require.NoError(t, err) + + // Decode and compare objects for round-trip integrity + var v1 agreement.UnauthenticatedVote + err = protocol.Decode(msgpackOut, &v1) + require.NoError(t, err) + require.Equal(t, *v0, v1, "Vote %d round-trip mismatch", i) + } +} + +// TestStatefulEncoderReuse mirrors TestEncoderReuse in vpack_test.go but targets +// StatefulEncoder to guarantee that buffer reuse does not corrupt internal state. +func TestStatefulEncoderReuse(t *testing.T) { + partitiontest.PartitionTest(t) + + const numVotes = 10 + voteGen := generateRandomVote() + msgpackBufs := make([][]byte, 0, numVotes) + + // Generate and encode votes + for i := 0; i < numVotes; i++ { + buf := protocol.EncodeMsgp(voteGen.Example(i)) + require.LessOrEqual(t, len(buf), MaxMsgpackVoteSize) + msgpackBufs = append(msgpackBufs, buf) + } + + stEnc := NewStatelessEncoder() + stDec := NewStatelessDecoder() + enc, err := NewStatefulEncoder(1024) + require.NoError(t, err) + dec, err := NewStatefulDecoder(1024) + require.NoError(t, err) + + // 1) Compress into new buffers each time + var compressed [][]byte + for i, msgp := range msgpackBufs { + stateless, err := stEnc.CompressVote(nil, msgp) + require.NoError(t, err) + c, err := enc.Compress(nil, stateless) + require.NoError(t, err, "vote %d compress failed", i) + compressed = append(compressed, append([]byte(nil), c...)) + } + + for i, c := range compressed { + statelessOut, err := dec.Decompress(nil, c) + require.NoError(t, err, "vote %d decompress failed", i) + msgpackOut, err := stDec.DecompressVote(nil, statelessOut) + require.NoError(t, err) + var v agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(msgpackOut, &v)) + var orig agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(msgpackBufs[i], &orig)) + require.Equal(t, orig, v) + } + + // 2) Reuse a single destination slice + compressed = compressed[:0] + reused := make([]byte, 0, 4096) + for i, msgp := range msgpackBufs { + st, err := stEnc.CompressVote(nil, msgp) + require.NoError(t, err) + c, err := enc.Compress(reused[:0], st) + require.NoError(t, err, "vote %d compress failed (reuse)", i) + compressed = append(compressed, append([]byte(nil), c...)) + } + for i, c := range compressed { + stOut, err := dec.Decompress(nil, c) + require.NoError(t, err, "vote %d decompress failed (reuse)", i) + mpOut, err := stDec.DecompressVote(nil, stOut) + require.NoError(t, err) + var v agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(mpOut, &v)) + var orig agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(msgpackBufs[i], &orig)) + require.Equal(t, orig, v) + } + + // 3) Reuse a slice that grows over iterations + compressed = compressed[:0] + growing := make([]byte, 0, 8) + for i, msgp := range msgpackBufs { + st, err := stEnc.CompressVote(nil, msgp) + require.NoError(t, err) + c, err := enc.Compress(growing[:0], st) + require.NoError(t, err, "vote %d compress failed (growing)", i) + compressed = append(compressed, append([]byte(nil), c...)) + growing = c + } + for i, c := range compressed { + stOut, err := dec.Decompress(nil, c) + require.NoError(t, err, "vote %d decompress failed (growing)", i) + mpOut, err := stDec.DecompressVote(nil, stOut) + require.NoError(t, err) + var v agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(mpOut, &v)) + var orig agreement.UnauthenticatedVote + require.NoError(t, protocol.Decode(msgpackBufs[i], &orig)) + require.Equal(t, orig, v) + } +} + +func TestStatefulRndDelta(t *testing.T) { + partitiontest.PartitionTest(t) + + rounds := []uint64{10, 10, 11, 10, 11, 11, 20} + expected := []byte{hdr1RndLiteral, hdr1RndDeltaSame, hdr1RndDeltaPlus1, hdr1RndDeltaMinus1, hdr1RndDeltaPlus1, hdr1RndDeltaSame, hdr1RndLiteral} + + enc, err := NewStatefulEncoder(1024) + require.NoError(t, err) + dec, err := NewStatefulDecoder(1024) + require.NoError(t, err) + stEnc := NewStatelessEncoder() + stDec := NewStatelessDecoder() + voteGen := generateRandomVote() + + // Test both encoding and decoding in the same loop + for i, rnd := range rounds { + v := voteGen.Example(i) + v.R.Round = basics.Round(rnd) + + msgp := protocol.EncodeMsgp(v) + statelessBuf, err := stEnc.CompressVote(nil, msgp) + require.NoError(t, err) + + // Compress with stateful encoder + compressedBuf, err := enc.Compress(nil, statelessBuf) + require.NoError(t, err) + require.GreaterOrEqual(t, len(compressedBuf), 2) + + // Verify the round delta encoding in the header matches expectations + got := compressedBuf[1] & hdr1RndMask + require.Equal(t, expected[i], got) + + // Decompress with the stateful decoder + decompressedBuf, err := dec.Decompress(nil, compressedBuf) + require.NoError(t, err) + require.Equal(t, statelessBuf, decompressedBuf) + + // Decompress with the stateless decoder + decompressedStatelessBuf, err := stDec.DecompressVote(nil, statelessBuf) + require.NoError(t, err) + require.Equal(t, msgp, decompressedStatelessBuf) + + } +} + +func TestStatefulEncodeRef(t *testing.T) { + // ensure lruTableReferenceID can fit in uint16 encoding used in appendDynamicRef + partitiontest.PartitionTest(t) + var id lruTableReferenceID + require.Equal(t, uintptr(2), unsafe.Sizeof(id), "lruTableReferenceID should occupy 2 bytes (uint16)") + require.Equal(t, reflect.Uint16, reflect.TypeFor[lruTableReferenceID]().Kind(), "lruTableReferenceID underlying kind should be uint16") + // Maximum table size we support is 2048 (1024 buckets, 2 slots each) + // Last bucket would be 1023, last slot would be 1, so maxID = (1023<<1)|1 = 2047 + maxTableSize := uint32(2048) + maxBucketIndex := (maxTableSize / 2) - 1 + maxID := lruTableReferenceID((maxBucketIndex << 1) | 1) // last bucket, last slot + require.LessOrEqual(t, uint32(maxID), uint32(math.MaxUint16)) +} + +func TestStatefulDecoderErrors(t *testing.T) { + partitiontest.PartitionTest(t) + + fullVote := slices.Concat( + // Header with all hdr0 optional bits set, but no hdr1 bits + []byte{byte(bitPer | bitDig | bitStep | bitEncDig | bitOper | bitOprop), 0x00}, + make([]byte, pfSize), // Credential prefix (80 bytes) + []byte{msgpUint32}, // Per field marker + []byte{0x01, 0x02, 0x03, 0x04}, // Per value (4 bytes) + make([]byte, digestSize), // Digest (32 bytes) + make([]byte, digestSize), // EncDig (32 bytes) + []byte{msgpUint32}, // Oper field marker + []byte{0x01, 0x02, 0x03, 0x04}, // Oper value (4 bytes) + make([]byte, digestSize), // Oprop (32 bytes) + []byte{msgpUint32}, // Round marker (msgpack marker) + []byte{0x01, 0x02, 0x03, 0x04}, // Round value (4 bytes) + make([]byte, digestSize), // Sender (32 bytes) + []byte{msgpUint32}, // Step field marker + []byte{0x01, 0x02, 0x03, 0x04}, // Step value (4 bytes) + make([]byte, pkSize+sigSize), // pk + p1s (96 bytes: 32 for pk, 64 for p1s) + make([]byte, pkSize+sigSize), // pk2 + p2s (96 bytes: 32 for pk2, 64 for p2s) + make([]byte, sigSize), // sig.s (64 bytes) + ) + + refVote := slices.Concat( + // Header with all hdr1 reference bits set, but no hdr0 bits + []byte{0x00, byte(hdr1SndRef | hdr1PkRef | hdr1Pk2Ref | hdr1RndLiteral)}, + make([]byte, pfSize), // Credential prefix + []byte{0x07}, // Round literal (fixint 7) + []byte{0x01, 0x02}, // Sender ref ID + []byte{0x03, 0x04}, // pk ref ID + []byte{0x05, 0x06}, // pk2 ref ID + make([]byte, sigSize), // sig.s + ) + + for _, tc := range []struct { + want string + buf []byte + }{ + // Truncation errors + {"input shorter than header", fullVote[:1]}, + {"truncated pf", fullVote[:2]}, + {"truncated per marker", fullVote[:82]}, + {"truncated per", fullVote[:83]}, + {"truncated digest", fullVote[:87]}, + {"truncated encdig", fullVote[:119]}, + {"truncated oper marker", fullVote[:151]}, + {"truncated oper", fullVote[:152]}, + {"truncated oprop", fullVote[:160]}, + {"truncated rnd marker", fullVote[:188]}, + {"truncated rnd", fullVote[:189]}, + {"truncated sender", fullVote[:193]}, + {"truncated step marker", fullVote[:225]}, + {"truncated step", fullVote[:226]}, + {"truncated pk bundle", fullVote[:234]}, + {"truncated pk2 bundle", fullVote[:334]}, + {"truncated sig.s", fullVote[:422]}, + // Reference ID decoding errors + {"truncated snd ref", refVote[:84]}, + {"truncated pk ref", refVote[:86]}, + {"truncated pk2 ref", refVote[:88]}, + {"bad sender ref", slices.Concat(refVote[:83], []byte{0xFF, 0xFF})}, + {"bad pk ref", slices.Concat(refVote[:85], []byte{0xFF, 0xFF})}, + {"bad pk2 ref", slices.Concat(refVote[:87], []byte{0xFF, 0xFF})}, + {"bad proposal ref", slices.Concat( + []byte{0x00, byte(3 << hdr1PropShift)}, // proposal reference ID 3 (invalid, StatefulDecoder is empty) + make([]byte, pfSize), // pf + []byte{0x01}, // round (fixint 1) + )}, + {"length mismatch: expected", slices.Concat(fullVote, []byte{0xFF, 0xFF})}, + } { + t.Run(tc.want, func(t *testing.T) { + dec, err := NewStatefulDecoder(1024) + require.NoError(t, err) + _, err = dec.Decompress(nil, tc.buf) + require.ErrorContains(t, err, tc.want) + }) + } +} + +func TestStatefulEncoderErrors(t *testing.T) { + partitiontest.PartitionTest(t) + + enc, err := NewStatefulEncoder(1024) + require.NoError(t, err) + + // Source too short error + _, err = enc.Compress(nil, []byte{0x00}) + require.ErrorContains(t, err, "src too short") + + // Length mismatch error + vote := generateRandomVote().Example(0) + stEnc := NewStatelessEncoder() + statelessBuf, err := stEnc.CompressVote(nil, protocol.EncodeMsgp(vote)) + require.NoError(t, err) + + badBuf := append(statelessBuf, 0xFF) // append spurious byte + _, err = enc.Compress(nil, badBuf) + require.ErrorContains(t, err, "length mismatch") + + // Test nil dst + compressedBuf, err := enc.Compress(nil, statelessBuf) + require.NoError(t, err) + require.Greater(t, len(compressedBuf), 0) + + // Test bounds checking errors + testCases := []struct { + name string + buf []byte + want string + }{ + { + name: "truncated pf", + buf: []byte{0x00, 0x00}, // header only, no pf + want: "truncated pf", + }, + { + name: "truncated r.per marker", + buf: append([]byte{byte(bitPer), 0x00}, make([]byte, pfSize)...), // header + pf, no per marker + want: "truncated r.per marker", + }, + { + name: "truncated r.per", + buf: append([]byte{byte(bitPer), 0x00}, append(make([]byte, pfSize), msgpUint32)...), // header + pf + per marker, no per data + want: "truncated r.per", + }, + { + name: "truncated dig", + buf: append([]byte{byte(bitDig), 0x00}, make([]byte, pfSize)...), // header + pf, no dig + want: "truncated dig", + }, + { + name: "truncated encdig", + // When bitDig is not set but bitEncDig is set, we expect encdig directly after pf + buf: append([]byte{byte(bitEncDig), 0x00}, make([]byte, pfSize)...), // header + pf, no encdig + want: "truncated encdig", + }, + { + name: "truncated oper marker", + buf: append([]byte{byte(bitOper), 0x00}, make([]byte, pfSize)...), // header + pf, no oper marker + want: "truncated oper marker", + }, + { + name: "truncated oper", + buf: append([]byte{byte(bitOper), 0x00}, append(make([]byte, pfSize), msgpUint32)...), // header + pf + oper marker, no oper data + want: "truncated oper", + }, + { + name: "truncated oprop", + buf: append([]byte{byte(bitOprop), 0x00}, make([]byte, pfSize)...), // header + pf, no oprop + want: "truncated oprop", + }, + { + name: "truncated rnd marker", + buf: append([]byte{0x00, 0x00}, make([]byte, pfSize)...), // header + pf, no rnd marker + want: "truncated rnd marker", + }, + { + name: "truncated rnd", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), msgpUint32)...), // header + pf + rnd marker, no rnd data + want: "truncated rnd", + }, + { + name: "truncated sender", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), 0x07)...), // header + pf + rnd (fixint), no sender + want: "truncated sender", + }, + { + name: "truncated step marker", + buf: append([]byte{byte(bitStep), 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, make([]byte, digestSize)...)...)...), // header + pf + rnd + sender, no step marker + want: "truncated step marker", + }, + { + name: "truncated step", + buf: append([]byte{byte(bitStep), 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, append(make([]byte, digestSize), msgpUint32)...)...)...), // header + pf + rnd + sender + step marker, no step data + want: "truncated step", + }, + { + name: "truncated pk bundle", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, make([]byte, digestSize)...)...)...), // header + pf + rnd + sender, no pk bundle + want: "truncated pk bundle", + }, + { + name: "truncated pk2 bundle", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, append(make([]byte, digestSize), make([]byte, pkSize+sigSize)...)...)...)...), // header + pf + rnd + sender + pk bundle, no pk2 bundle + want: "truncated pk2 bundle", + }, + { + name: "truncated sig.s", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, append(make([]byte, digestSize), append(make([]byte, pkSize+sigSize), make([]byte, pkSize+sigSize)...)...)...)...)...), // everything except sig.s + want: "truncated sig.s", + }, + { + name: "invalid r.per marker", + buf: append([]byte{byte(bitPer), 0x00}, append(make([]byte, pfSize), 0xFF)...), // header + pf + invalid per marker + want: "invalid r.per marker", + }, + { + name: "invalid oper marker", + buf: append([]byte{byte(bitOper), 0x00}, append(make([]byte, pfSize), 0xFF)...), // header + pf + invalid oper marker + want: "invalid oper marker", + }, + { + name: "invalid rnd marker", + buf: append([]byte{0x00, 0x00}, append(make([]byte, pfSize), 0xFF)...), // header + pf + invalid rnd marker + want: "invalid rnd marker", + }, + { + name: "invalid step marker", + buf: append([]byte{byte(bitStep), 0x00}, append(make([]byte, pfSize), append([]byte{0x07}, append(make([]byte, digestSize), 0xFF)...)...)...), // header + pf + rnd + sender + invalid step marker + want: "invalid step marker", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := enc.Compress(nil, tc.buf) + require.ErrorContains(t, err, tc.want) + }) + } +} + +func TestStatefulEncoderHeaderBits(t *testing.T) { + partitiontest.PartitionTest(t) + // Ensure that the three bits allocated in hdr1 for proposal references + // matches the size of the proposal window. + got := int(hdr1PropMask >> hdr1PropShift) + require.Equal(t, proposalWindowSize, got, + "hdr1PropMask (%d) and proposalWindowSize (%d) must stay in sync", got, proposalWindowSize) + + // Ensure that the header encoding of hdr1RndLiteral is zero + require.Equal(t, hdr1RndLiteral, 0) +} diff --git a/network/vpack/lru_table.go b/network/vpack/lru_table.go new file mode 100644 index 0000000000..6153d39c48 --- /dev/null +++ b/network/vpack/lru_table.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +import ( + "errors" +) + +// lruTable is a fixed-size, 2-way set-associative hash table with 512 buckets. +// Each bucket contains exactly two entries, with LRU eviction on collision. +// The implementation is O(1) and zero-allocation during lookups and inserts. +// +// Each bucket has a MRU bit that encodes which of the two slots is MRU. The +// bit is set to 0 if the first slot is MRU, and 1 if the second slot is MRU. +// +// Reference IDs are encoded as (bucket << 1) | slot, where bucket is the index +// of the bucket and slot is the index of the slot within the bucket (0 or 1). +type lruTable[K comparable] struct { + numBuckets uint + buckets []twoSlotBucket[K] + mru []byte // 1 bit per bucket +} + +// newLRUTable creates a new LRU table with the given size N. +// The size N is the total number of entries in the table. +// The number of buckets is N/2, and each bucket contains 2 slots. +func newLRUTable[K comparable](n uint) (*lruTable[K], error) { + // enforce size is a power of 2 and at least 16 + if n < 16 || n&(n-1) != 0 { + return nil, errors.New("lruTable size must be a power of 2 and at least 16") + } + numBuckets := n / 2 + return &lruTable[K]{ + numBuckets: numBuckets, + buckets: make([]twoSlotBucket[K], numBuckets), + mru: make([]byte, numBuckets/8), + }, nil +} + +// twoSlotBucket is a 2-way set-associative bucket that contains two slots. +type twoSlotBucket[K comparable] struct{ slots [2]K } + +// lruBucketIndex is the index of a bucket in the LRU table. +type lruBucketIndex uint32 + +// lruSlotIndex is the index of a slot in a bucket, either 0 or 1. +type lruSlotIndex uint8 + +// lruTableReferenceID is the reference ID for a key in the LRU table. +type lruTableReferenceID uint16 + +// mruBitmask returns the byte index and bit mask for the MRU bit of bucket b. +func (t *lruTable[K]) mruBitmask(b lruBucketIndex) (byteIdx uint32, mask byte) { + byteIdx = uint32(b) >> 3 + bitIdx := b & 7 + mask = 1 << bitIdx + return byteIdx, mask +} + +// getLRUSlot returns the index of the LRU slot in bucket b +func (t *lruTable[K]) getLRUSlot(b lruBucketIndex) lruSlotIndex { + byteIdx, mask := t.mruBitmask(b) + if (t.mru[byteIdx] & mask) == 0 { + return 1 // this bucket's bit is 0, meaning slot 1 is LRU + } + return 0 // this bucket's bit is 1, meaning slot 0 is LRU +} + +// setMRUSlot marks the given bucket and slot index as MRU +func (t *lruTable[K]) setMRUSlot(b lruBucketIndex, slot lruSlotIndex) { + byteIdx, mask := t.mruBitmask(b) + if slot == 0 { // want to set slot 0 to be MRU, so bucket bit should be 0 + t.mru[byteIdx] &^= mask + } else { // want to set slot 1 to be MRU, so bucket bit should be 1 + t.mru[byteIdx] |= mask + } +} + +func (t *lruTable[K]) hashToBucketIndex(h uint64) lruBucketIndex { + // Use the lower bits of the hash to determine the bucket index. + return lruBucketIndex(h & uint64(t.numBuckets-1)) +} + +// lookup returns the reference ID of the given key, if it exists. The hash is +// used to determine the bucket, and the key is used to determine the slot. +// A lookup marks the found key as MRU. +func (t *lruTable[K]) lookup(k K, h uint64) (id lruTableReferenceID, ok bool) { + b := t.hashToBucketIndex(h) + bk := &t.buckets[b] + if bk.slots[0] == k { + t.setMRUSlot(b, 0) + return lruTableReferenceID(b << 1), true + } + if bk.slots[1] == k { + t.setMRUSlot(b, 1) + return lruTableReferenceID(b<<1 | 1), true + } + return 0, false +} + +// insert inserts the given key into the table and returns its reference ID. +// The hash is used to determine the bucket, and the LRU slot is used to +// determine the slot. The inserted key is marked as MRU. +func (t *lruTable[K]) insert(k K, h uint64) lruTableReferenceID { + b := t.hashToBucketIndex(h) + evict := t.getLRUSlot(b) // LRU slot + t.buckets[b].slots[evict] = k + t.setMRUSlot(b, evict) // new key -> MRU + return lruTableReferenceID((lruTableReferenceID(b) << 1) | lruTableReferenceID(evict)) +} + +// fetch returns the key by id and marks it as MRU. If the id is invalid, it +// returns false (leading to a decoder error). The key is marked as MRU. +func (t *lruTable[K]) fetch(id lruTableReferenceID) (K, bool) { + b := lruBucketIndex(id >> 1) + slot := lruSlotIndex(id & 1) + if b >= lruBucketIndex(t.numBuckets) { // invalid id + var zero K + return zero, false + } + // touch MRU bit + t.setMRUSlot(b, slot) + return t.buckets[b].slots[slot], true +} diff --git a/network/vpack/lru_table_test.go b/network/vpack/lru_table_test.go new file mode 100644 index 0000000000..8abf267a3b --- /dev/null +++ b/network/vpack/lru_table_test.go @@ -0,0 +1,364 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +import ( + "encoding/binary" + "hash/fnv" + "testing" + "testing/quick" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func TestLRUTableSizeValidation(t *testing.T) { + partitiontest.PartitionTest(t) + + // Test invalid size (not power of 2) + _, err := NewStatefulEncoder(100) + require.Error(t, err) + require.Contains(t, err.Error(), "must be a power of 2") + + // Test invalid size (too small) + _, err = NewStatefulEncoder(8) + require.Error(t, err) + require.Contains(t, err.Error(), "at least 16") + + // Test valid sizes + for _, size := range []uint{16, 32, 64, 128, 256, 512, 1024, 2048} { + enc, err := NewStatefulEncoder(size) + require.NoError(t, err) + require.NotNil(t, enc) + + dec, err := NewStatefulDecoder(size) + require.NoError(t, err) + require.NotNil(t, dec) + } +} + +// TestLRUTableInvalidID tests the fetch function with an invalid ID +func TestLRUTableInvalidID(t *testing.T) { + partitiontest.PartitionTest(t) + + // Test fetch with invalid ID (greater than table size) + table, err := newLRUTable[pkSigPair](1024) + require.NoError(t, err) + var invalidID lruTableReferenceID = 1024 // greater than numBuckets (512) + result, ok := table.fetch(invalidID) + require.False(t, ok) + require.Equal(t, pkSigPair{}, result) +} + +func TestLRUTableInsertLookupFetch(t *testing.T) { + partitiontest.PartitionTest(t) + tab, err := newLRUTable[int](1024) + require.NoError(t, err) + + const bucketHash = 42 // deterministic hash for test + const baseID = bucketHash << 1 // slot-bit is OR-ed below + + // first insert on empty table sees MRU bit 0, so slot 1 is LRU + id1 := tab.insert(100, bucketHash) + // id1 is baseID | 1 (value was stored in slot 1) + require.EqualValues(t, baseID|1, id1) + // on insert, our slot 1 is now the MRU, so LRU is slot 0 + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // lookup for same value and bucketHash returns the same ID + id, ok := tab.lookup(100, bucketHash) + require.True(t, ok) + require.EqualValues(t, id1, id) + // MRU/LRU is unchanged + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // second insert with new value for same hash sees MRU bit 1, so slot 0 is LRU + id2 := tab.insert(200, bucketHash) + require.EqualValues(t, baseID, id2) + // MRU/LRU is flipped + require.Equal(t, lruSlotIndex(1), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // old key (100) is still in slot 1 + _, ok = tab.lookup(100, bucketHash) + require.True(t, ok) + // the act of lookup 100 flips the MRU bit to 1 + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // lookup for 200 (slot 0) → MRU bit flips to 0 + _, ok = tab.lookup(200, bucketHash) + require.True(t, ok) + require.Equal(t, lruSlotIndex(1), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // third insert: evicts and replaces slot 1, and now MRU is slot 1 + id3 := tab.insert(300, bucketHash) + require.EqualValues(t, baseID|1, id3) + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // fetch(id3) returns the value 300 and keeps the MRU bit at slot 1 + val, ok := tab.fetch(id3) + require.True(t, ok) + require.Equal(t, 300, val) + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // after insert for a new value, slot 0 is evicted and assigned + id4 := tab.insert(400, bucketHash) + require.EqualValues(t, baseID, id4) + // now slot 1 is LRU + require.Equal(t, lruSlotIndex(1), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // fetch of 300 (slot 1) makes it the new MRU + val, ok = tab.fetch(id3) + require.True(t, ok) + require.Equal(t, 300, val) + require.Equal(t, lruSlotIndex(0), tab.getLRUSlot(lruBucketIndex(bucketHash))) + + // fetch of 400 (slot 0) makes it the new MRU + val, ok = tab.fetch(id4) + require.True(t, ok) + require.Equal(t, 400, val) + require.Equal(t, lruSlotIndex(1), tab.getLRUSlot(lruBucketIndex(bucketHash))) +} + +// TestLRUEvictionOrder verifies that the LRU table correctly evicts the least recently used item +// when inserting into a full bucket. This test will fail if the lruSlot implementation is incorrect. +func TestLRUEvictionOrder(t *testing.T) { + partitiontest.PartitionTest(t) + tab, err := newLRUTable[int](1024) + require.NoError(t, err) + bucketHash := uint64(42) // Use same hash to ensure both items go into the same bucket + + // Insert first value + id1 := tab.insert(100, bucketHash) + val1, ok := tab.fetch(id1) + require.True(t, ok) + require.Equal(t, 100, val1) + + // Insert second value to the same bucket + id2 := tab.insert(200, bucketHash) + val2, ok := tab.fetch(id2) + require.True(t, ok) + require.Equal(t, 200, val2) + + // Both values should still be accessible + refID, ok := tab.lookup(100, bucketHash) + require.True(t, ok, "First inserted value should still exist") + require.EqualValues(t, id1, refID, "Reference ID for first value should match") + + refID, ok = tab.lookup(200, bucketHash) + require.True(t, ok, "Second inserted value should exist") + require.EqualValues(t, id2, refID, "Reference ID for second value should match") + + // Access the first value to make it MRU + refID, ok = tab.lookup(100, bucketHash) + require.True(t, ok) + require.EqualValues(t, id1, refID) + + // Now the second value (200) should be LRU + // Insert a third value - it should evict the second value (200) + id3 := tab.insert(300, bucketHash) + val3, ok := tab.fetch(id3) + require.True(t, ok) + require.Equal(t, 300, val3) + + // First value should still be accessible + refID, ok = tab.lookup(100, bucketHash) + require.True(t, ok, "First value should still exist after third insert") + require.EqualValues(t, id1, refID) + + // Second value should have been evicted + refID, ok = tab.lookup(200, bucketHash) + require.False(t, ok, "Second value should be evicted as it was LRU") + require.EqualValues(t, 0, refID) + + // But the third value should be accessible + refID, ok = tab.lookup(300, bucketHash) + require.True(t, ok, "Third value should exist") + require.EqualValues(t, id3, refID) + + // Now make the third value MRU + refID, ok = tab.lookup(300, bucketHash) + require.True(t, ok) + require.EqualValues(t, id3, refID) + + // Insert a fourth value - it should evict the first value (100) + id4 := tab.insert(400, bucketHash) + val4, ok := tab.fetch(id4) + require.True(t, ok) + require.Equal(t, 400, val4) + + // First value should now be evicted + refID, ok = tab.lookup(100, bucketHash) + require.False(t, ok, "First value should now be evicted as it became LRU") + require.EqualValues(t, 0, refID) + + // Third and fourth values should be accessible + refID, ok = tab.lookup(300, bucketHash) + require.True(t, ok, "Third value should still exist") + require.EqualValues(t, id3, refID) + refID, ok = tab.lookup(400, bucketHash) + require.True(t, ok, "Fourth value should exist") + require.EqualValues(t, id4, refID) +} + +// TestLRURefIDConsistency verifies that reference IDs remain consistent +// and that fetch/lookup operations correctly mark items as MRU +func TestLRURefIDConsistency(t *testing.T) { + partitiontest.PartitionTest(t) + tab, err := newLRUTable[int](1024) + require.NoError(t, err) + bucketHash := uint64(42) + + // Insert and get reference ID + id1 := tab.insert(100, bucketHash) + + // Lookup should return the same reference ID + ref, ok := tab.lookup(100, bucketHash) + require.True(t, ok) + require.Equal(t, id1, ref, "Reference ID from lookup should match insert") + + // Fetch using the ID should return the correct value + val, ok := tab.fetch(id1) + require.True(t, ok) + require.Equal(t, 100, val, "Fetch should return the correct value") + + // Insert another value with same hash (same bucket) + id2 := tab.insert(200, bucketHash) + require.NotEqual(t, id1, id2, "Different values should have different reference IDs") + + // Both values should be accessible via their reference IDs + val1, ok1 := tab.fetch(id1) + val2, ok2 := tab.fetch(id2) + require.True(t, ok1) + require.True(t, ok2) + require.Equal(t, 100, val1) + require.Equal(t, 200, val2) +} + +// TestLRUErrorPaths tests the error paths in fetch operations to ensure 100% coverage +func TestLRUErrorPaths(t *testing.T) { + partitiontest.PartitionTest(t) + + // The lruTableSize in lru_table.go is 512, so we need to create an ID + // where the bucket index (id >> 1) exceeds this value + // If bucket index >= 512, fetch should return false + invalidBucketID := lruTableReferenceID(1024 << 1) // (1024 is > 512) + + // Create a decoder with an empty LRU table + dec, err := NewStatefulDecoder(1024) + require.NoError(t, err) + + // Attempt to access references with invalid bucket IDs + _, ok := dec.sndTable.fetch(invalidBucketID) + require.False(t, ok) + _, ok = dec.pkTable.fetch(invalidBucketID) + require.False(t, ok) + _, ok = dec.pk2Table.fetch(invalidBucketID) + require.False(t, ok) + + // Attempt to access an invalid proposal reference by looking up a proposal that doesn't exist + prop := proposalEntry{dig: crypto.Digest{1}, encdig: crypto.Digest{2}} + index := dec.proposalWindow.lookup(prop) + require.Equal(t, 0, index) +} + +func TestLRUTableQuick(t *testing.T) { + partitiontest.PartitionTest(t) + cfg := &quick.Config{MaxCount: 50000} + + hashfn := func(v uint32) uint64 { + // use FNV-1 for hashing test values + h64 := fnv.New64() + h64.Write([]byte(binary.LittleEndian.AppendUint32(nil, v))) + return h64.Sum64() + } + + // Property: when a third distinct value is inserted into a bucket, the + // previously least-recently-used (LRU) value must be evicted, while the + // previously most-recently-used (MRU) value survives. + prop := func(seq []uint32) bool { + tab, err := newLRUTable[uint32](1024) + require.NoError(t, err) + + // Per-bucket ordered list of values, index 0 == MRU, len<=2. + type order []uint32 + expectedState := make(map[lruBucketIndex]order) + + for _, v := range seq { + h := hashfn(v) + b := tab.hashToBucketIndex(h) + expectedBucket := expectedState[b] + + // First, try lookup. + if id, ok := tab.lookup(v, h); ok { + // Move found value to MRU position in state. + if len(expectedBucket) == 2 { + if expectedBucket[0] != v { + expectedBucket[0], expectedBucket[1] = v, expectedBucket[0] + } + } else if len(expectedBucket) == 1 { + expectedBucket[0] = v // already MRU + } + + // Round-trip fetch check. + fetched, okF := tab.fetch(id) + if !okF || fetched != v { + return false + } + expectedState[b] = expectedBucket + continue + } + + // Insert new distinct value. + _ = tab.insert(v, h) + // Update expected state. + switch len(expectedBucket) { + case 0: // Bucket was empty + expectedState[b] = order{v} + continue + case 1: // Bucket had one value + expectedState[b] = order{v, expectedBucket[0]} + continue + case 2: // Bucket was full, expect eviction of state[1] + lruVal := expectedBucket[1] + + // After insert: MRU is v, survivor should be previous MRU (state[0]) + expectedState[b] = order{v, expectedBucket[0]} + + // Check LRU really went away + if _, ok := tab.lookup(lruVal, h); ok { + return false + } + // The previous MRU MUST still be present + if _, ok := tab.lookup(expectedBucket[0], h); !ok { + return false + } + // The newly inserted value must be present + if _, ok := tab.lookup(v, h); !ok { + return false + } + default: // Should not happen + return false + } + } + return true + } + + if err := quick.Check(prop, cfg); err != nil { + t.Fatalf("quick-check failed: %v", err) + } +} diff --git a/network/vpack/msgp.go b/network/vpack/msgp.go index 0346eae866..b515d1fa3c 100644 --- a/network/vpack/msgp.go +++ b/network/vpack/msgp.go @@ -60,6 +60,26 @@ func isMsgpFixint(b byte) bool { return b>>7 == 0 } +// msgpVaruintRemaining looks at the first byte of a msgpack-encoded variable-length unsigned integer, +// and returns the number of bytes remaining in the encoded value (not including the first byte). +func msgpVaruintRemaining(first byte) (int, error) { + switch first { + case msgpUint8: + return 1, nil + case msgpUint16: + return 2, nil + case msgpUint32: + return 4, nil + case msgpUint64: + return 8, nil + default: + if !isMsgpFixint(first) { + return 0, fmt.Errorf("msgpVaruintRemaining: expected fixint or varuint tag, got 0x%02x", first) + } + return 0, nil + } +} + // msgpVoteParser provides a zero-allocation msgpVoteParser for vote messages. type msgpVoteParser struct { data []byte @@ -171,24 +191,14 @@ func (p *msgpVoteParser) readUintBytes() ([]byte, error) { if err != nil { return nil, err } + dataSize, err := msgpVaruintRemaining(b) + if err != nil { + return nil, err + } // fixint is a single byte containing marker and value - if isMsgpFixint(b) { + if dataSize == 0 { return p.data[startPos : startPos+1], nil } - // otherwise, we expect a tag byte followed by the value - var dataSize int - switch b { - case msgpUint8: - dataSize = 1 - case msgpUint16: - dataSize = 2 - case msgpUint32: - dataSize = 4 - case msgpUint64: - dataSize = 8 - default: - return nil, fmt.Errorf("expected uint tag, got 0x%02x", b) - } if err := p.ensureBytes(dataSize); err != nil { return nil, err } diff --git a/network/vpack/proposal_window.go b/network/vpack/proposal_window.go new file mode 100644 index 0000000000..b4004c4c24 --- /dev/null +++ b/network/vpack/proposal_window.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +// proposalEntry contains all the values inside the r.prop map in a vote. +// Some fields may be omitted, so a mask is used to indicate which fields +// are present (bitDig, bitEncDig, bitOper, bitOprop). +type proposalEntry struct { + dig, encdig, oprop [digestSize]byte + operEnc [maxMsgpVaruintSize]byte // msgp varuint encoding of oper + operLen uint8 // length of operEnc + mask uint8 // which fields were present +} + +// proposalWindowSize is fixed because hdr[1] holds only 3 bits for the reference code +// (0 = literal, 1-7 = index). +const proposalWindowSize = 7 + +// propWindow implements a small sliding window for vote proposal bundles. +// It behaves like the dynamic table defined in RFC 7541 (HPACK), but is limited +// to 7 entries, encoded using 3 bits in the header byte. This is enough to +// provide effective compression, since usually almost all the votes in a round +// are for the same proposal value. +type propWindow struct { + entries [proposalWindowSize]proposalEntry // circular buffer + head int // slot of the oldest entry + size int // number of live entries (0 ... windowSize) +} + +// lookup returns the 1-based HPACK index of pv. It walks from the oldest entry +// to the newest; worst-case is seven comparisons, which is fine for such a +// small table. Returns 0 if not found. +func (w *propWindow) lookup(pv proposalEntry) int { + for i := range w.size { + slot := (w.head + i) % proposalWindowSize // oldest first + if w.entries[slot] == pv { + // Convert position to HPACK index. + // Example: size == 7 + // i == 0 (oldest) -> index 7 + // i == 1 -> index 6 + // i == 2 -> index 5 + // ... + // i == 6 (newest) -> index 1 + return w.size - i + } + } + return 0 +} + +// byRef returns the proposalEntry stored at HPACK index idx (1 ... w.size). +// ok == false if idx is out of range. +func (w *propWindow) byRef(idx int) (prop proposalEntry, ok bool) { + if idx < 1 || idx > w.size { + return proposalEntry{}, false + } + // convert HPACK index (1 == newest, w.size == oldest) to physical slot + // newest slot is (head + size - 1) % windowSize + // logical slot idx is (idx - 1) positions from newest + physical := (w.head + w.size - idx) % proposalWindowSize + // Example: size == 7, head == 2 + // logical idx == 1 (newest) -> slot (2 + 7 - 1) % 7 == slot 1 + // logical idx == 2 -> slot (2 + 7 - 2) % 7 == slot 0 + // logical idx == 3 -> slot (2 + 7 - 3) % 7 == slot 6 + // ... + // logical idx == 7 (oldest) -> slot (2 + 7 - 7) % 7 == slot 2 + return w.entries[physical], true +} + +// insertNew puts pv into the table as the newest entry (HPACK index 1). +// When the table is full, the oldest one is overwritten. +func (w *propWindow) insertNew(pv proposalEntry) { + if w.size == proposalWindowSize { + // Evict the oldest element at w.head, then advance head. + w.entries[w.head] = pv + w.head = (w.head + 1) % proposalWindowSize + } else { + // Store at the slot just after the current newest. + pos := (w.head + w.size) % proposalWindowSize + w.entries[pos] = pv + w.size++ + } +} diff --git a/network/vpack/proposal_window_test.go b/network/vpack/proposal_window_test.go new file mode 100644 index 0000000000..1ec9a9dab5 --- /dev/null +++ b/network/vpack/proposal_window_test.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package vpack + +import ( + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func makeTestPropBundle(seed byte) proposalEntry { + var p proposalEntry + for i := range p.dig { + p.dig[i] = seed + } + p.operLen = 1 + p.operEnc[0] = seed + p.mask = bitDig | bitOper + return p +} + +func TestPropWindowHPACK(t *testing.T) { + partitiontest.PartitionTest(t) + var w propWindow + + // 1. Insert seven unique entries (fills the window). + for i := 0; i < proposalWindowSize; i++ { + pb := makeTestPropBundle(byte(i)) + w.insertNew(pb) + require.Equal(t, i+1, w.size, "size incorrect after insertNew") + // Newly inserted entry should always be HPACK index 1 (MRU). + require.Equal(t, 1, w.lookup(pb), "lookup did not return 1") + } + + // 2. Verify byRef/lookup mapping for current content. + for idx := 1; idx <= proposalWindowSize; idx++ { + prop, ok := w.byRef(idx) + require.True(t, ok) + expectedSeed := byte(proposalWindowSize - idx) // newest (idx==1) == seed 6, oldest (idx==7) == seed 0 + want := makeTestPropBundle(expectedSeed) + require.Equal(t, want, prop) + } + + // 3. Insert an eighth entry – should evict the oldest (seed 0). + evicted := makeTestPropBundle(0) + newEntry := makeTestPropBundle(7) + w.insertNew(newEntry) + require.Equal(t, proposalWindowSize, w.size, "size after eviction incorrect") + + // Oldest should now be former seed 1, and evicted one should not be found. + require.Equal(t, 0, w.lookup(evicted), "evicted entry still found") + + // New entry must be at HPACK index 1. + require.Equal(t, 1, w.lookup(newEntry), "newest entry lookup not 1") + + // Verify byRef again: idx 1 == seed 7, idx 7 == seed 1 + prop, ok := w.byRef(1) + require.True(t, ok) + require.Equal(t, newEntry, prop) + + prop, ok = w.byRef(proposalWindowSize) + require.True(t, ok) + require.Equal(t, makeTestPropBundle(1), prop) +} diff --git a/network/vpack/vpack.go b/network/vpack/vpack.go index 05e2c5861d..18b552ed91 100644 --- a/network/vpack/vpack.go +++ b/network/vpack/vpack.go @@ -39,7 +39,7 @@ const ( ) const ( - headerSize = 2 // 1 byte for mask, 1 byte for future use + headerSize = 2 // 1 byte for StatelessEncoder, 1 byte for StatefulEncoder maxMsgpVaruintSize = 9 // max size of a varuint is 8 bytes + 1 byte for the marker msgpBin8Len32Size = len(msgpBin8Len32) + 32 @@ -376,21 +376,9 @@ func (d *StatelessDecoder) varuint(fieldName string) error { return fmt.Errorf("not enough data to read varuint marker for field %s", fieldName) } marker := d.src[d.pos] // read msgpack varuint marker - moreBytes := 0 - switch marker { - case msgpUint8: - moreBytes = 1 - case msgpUint16: - moreBytes = 2 - case msgpUint32: - moreBytes = 4 - case msgpUint64: - moreBytes = 8 - default: // fixint uses a single byte for marker+value - if !isMsgpFixint(marker) { - return fmt.Errorf("not a fixint for field %s, got %d", fieldName, marker) - } - moreBytes = 0 + moreBytes, err := msgpVaruintRemaining(marker) + if err != nil { + return fmt.Errorf("invalid varuint marker %d for field %s: %w", marker, fieldName, err) } if d.pos+1+moreBytes > len(d.src) { diff --git a/network/websocketProxy_test.go b/network/websocketProxy_test.go index fdb27bcaa8..753430a965 100644 --- a/network/websocketProxy_test.go +++ b/network/websocketProxy_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -// This is a simple reverse proxy for websocket connections. It is used to to test +// This is a simple reverse proxy for websocket connections. It is used to test // ws network behavior when UseXForwardedForAddressField is enabled. // Not suitable for production use. package network diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 184616e998..e0086c0766 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "maps" "net" "net/http" "net/textproto" @@ -166,6 +167,10 @@ type WebsocketNetwork struct { config config.Local + // voteCompressionTableSize is the validated/normalized table size for VP compression. + // It is set during setup() by validating config.StatefulVoteCompressionTableSize. + voteCompressionTableSize uint + log logging.Logger wg sync.WaitGroup @@ -228,12 +233,9 @@ type WebsocketNetwork struct { // connPerfMonitor is used on outgoing connections to measure their relative message timing connPerfMonitor *connectionPerformanceMonitor - // lastNetworkAdvanceMu synchronized the access to lastNetworkAdvance - lastNetworkAdvanceMu deadlock.Mutex - - // lastNetworkAdvance contains the last timestamp where the agreement protocol was able to make a notable progress. - // it used as a watchdog to help us detect connectivity issues ( such as cliques ) - lastNetworkAdvance time.Time + // outgoingConnsCloser used to check number of outgoing connections and disconnect as needed. + // it is also used as a watchdog to help us detect connectivity issues ( such as cliques ) so that it monitors agreement protocol progress. + outgoingConnsCloser *outgoingConnsCloser // number of throttled outgoing connections "slots" needed to be populated. throttledOutgoingConnections atomic.Int32 @@ -556,6 +558,9 @@ func (wn *WebsocketNetwork) setup() error { } wn.dialer = limitcaller.MakeRateLimitingDialer(wn.phonebook, preferredResolver) + // Validate and normalize vote compression table size + wn.voteCompressionTableSize = wn.config.NormalizedVoteCompressionTableSize(wn.log) + wn.upgrader.ReadBufferSize = 4096 wn.upgrader.WriteBufferSize = 4096 wn.upgrader.EnableCompression = false @@ -604,6 +609,7 @@ func (wn *WebsocketNetwork) setup() error { var err error wn.mesher, err = meshCreator.create( withContext(wn.ctx), + withTargetConnCount(wn.config.GossipFanout), withMeshNetMeshFn(wn.meshThreadInner), withMeshPeerStatReporter(func() { wn.peerStater.sendPeerConnectionsTelemetryStatus(wn) @@ -620,13 +626,7 @@ func (wn *WebsocketNetwork) setup() error { wn.eventualReadyDelay = time.Minute wn.prioTracker = newPrioTracker(wn) - readBufferLen := wn.config.IncomingConnectionsLimit + wn.config.GossipFanout - if readBufferLen < 100 { - readBufferLen = 100 - } - if readBufferLen > 10000 { - readBufferLen = 10000 - } + readBufferLen := min(max(wn.config.IncomingConnectionsLimit+wn.config.GossipFanout, 100), 10000) wn.handler = msgHandler{ ctx: wn.ctx, log: wn.log, @@ -642,7 +642,7 @@ func (wn *WebsocketNetwork) setup() error { wn.incomingMsgFilter = makeMessageFilter(wn.config.IncomingMessageFilterBucketCount, wn.config.IncomingMessageFilterBucketSize) } wn.connPerfMonitor = makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag, protocol.TxnTag}) - wn.lastNetworkAdvance = time.Now().UTC() + wn.outgoingConnsCloser = makeOutgoingConnsCloser(wn.log, wn, wn.connPerfMonitor, cliqueResolveInterval) // set our supported versions if wn.config.NetworkProtocolVersion != "" { @@ -671,7 +671,7 @@ func (wn *WebsocketNetwork) Start() error { wn.messagesOfInterestEnc = marshallMessageOfInterestMap(wn.messagesOfInterest) } - if wn.config.IsGossipServer() || wn.config.ForceRelayMessages { + if wn.relayMessages { listener, err := net.Listen("tcp", wn.config.NetAddress) if err != nil { wn.log.Errorf("network could not listen %v: %s", wn.config.NetAddress, err) @@ -847,7 +847,8 @@ type peerMetadataProvider interface { PublicAddress() string RandomID() string SupportedProtoVersions() []string - Config() config.Local + VoteCompressionEnabled() bool + StatefulVoteCompressionTableSize() uint } // TelemetryGUID returns the telemetry GUID of this node. @@ -875,6 +876,16 @@ func (wn *WebsocketNetwork) Config() config.Local { return wn.config } +// StatefulVoteCompressionTableSize returns the validated/normalized vote compression table size. +func (wn *WebsocketNetwork) StatefulVoteCompressionTableSize() uint { + return wn.voteCompressionTableSize +} + +// VoteCompressionEnabled returns whether vote compression is enabled for this node. +func (wn *WebsocketNetwork) VoteCompressionEnabled() bool { + return wn.config.EnableVoteCompression +} + func setHeaders(header http.Header, netProtoVer string, meta peerMetadataProvider) { header.Set(TelemetryIDHeader, meta.TelemetryGUID()) header.Set(InstanceNameHeader, meta.InstanceName()) @@ -887,10 +898,32 @@ func setHeaders(header http.Header, netProtoVer string, meta peerMetadataProvide header.Set(GenesisHeader, meta.GetGenesisID()) // set the features header (comma-separated list) - header.Set(PeerFeaturesHeader, PeerFeatureProposalCompression) - features := []string{PeerFeatureProposalCompression} - if meta.Config().EnableVoteCompression { - features = append(features, PeerFeatureVoteVpackCompression) + features := []string{peerFeatureProposalCompression} + if meta.VoteCompressionEnabled() { + features = append(features, peerFeatureVoteVpackCompression) + + // Announce our maximum supported vote compression table size + // Both sides will independently calculate min(ourSize, theirSize) + // Only advertise stateful features if stateless compression is also enabled + // Supported values: 16, 32, 64, 128, 256, 512, 1024, 2048 (or higher, which advertises 2048) + switch dtSize := uint32(meta.StatefulVoteCompressionTableSize()); { + case dtSize >= 2048: + features = append(features, peerFeatureVoteVpackStateful2048) + case dtSize >= 1024: + features = append(features, peerFeatureVoteVpackStateful1024) + case dtSize >= 512: + features = append(features, peerFeatureVoteVpackStateful512) + case dtSize >= 256: + features = append(features, peerFeatureVoteVpackStateful256) + case dtSize >= 128: + features = append(features, peerFeatureVoteVpackStateful128) + case dtSize >= 64: + features = append(features, peerFeatureVoteVpackStateful64) + case dtSize >= 32: + features = append(features, peerFeatureVoteVpackStateful32) + case dtSize >= 16: + features = append(features, peerFeatureVoteVpackStateful16) + } } header.Set(PeerFeaturesHeader, strings.Join(features, ",")) @@ -1115,21 +1148,23 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt client, _ := wn.GetHTTPClient(trackedRequest.remoteAddress()) peer := &wsPeer{ - wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, trackedRequest.remoteAddress(), client, trackedRequest.remoteHost), - conn: wsPeerWebsocketConnImpl{conn}, - outgoing: false, - InstanceName: trackedRequest.otherInstanceName, - incomingMsgFilter: wn.incomingMsgFilter, - prioChallenge: challenge, - createTime: trackedRequest.created, - version: matchingVersion, - identity: peerID, - identityChallenge: peerIDChallenge, - identityVerified: atomic.Uint32{}, - features: decodePeerFeatures(matchingVersion, request.Header.Get(PeerFeaturesHeader)), - enableVoteCompression: wn.config.EnableVoteCompression, + wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, trackedRequest.remoteAddress(), client, trackedRequest.remoteHost), + conn: wsPeerWebsocketConnImpl{conn}, + outgoing: false, + InstanceName: trackedRequest.otherInstanceName, + incomingMsgFilter: wn.incomingMsgFilter, + prioChallenge: challenge, + createTime: trackedRequest.created, + version: matchingVersion, + identity: peerID, + identityChallenge: peerIDChallenge, + identityVerified: atomic.Uint32{}, + features: decodePeerFeatures(matchingVersion, request.Header.Get(PeerFeaturesHeader)), + enableVoteCompression: wn.config.EnableVoteCompression, + voteCompressionTableSize: wn.voteCompressionTableSize, } peer.TelemetryGUID = trackedRequest.otherTelemetryGUID + wn.log.Debugf("Server: client features '%s', decoded %x, our response '%s'", request.Header.Get(PeerFeaturesHeader), peer.features, responseHeader.Get(PeerFeaturesHeader)) peer.init(wn.config, wn.outgoingMessagesBufferSize) wn.addPeer(peer) wn.log.With("event", "ConnectedIn").With("remote", trackedRequest.remoteAddress()).With("local", localAddr).Infof("Accepted incoming connection from peer %s", trackedRequest.remoteAddr) @@ -1433,10 +1468,14 @@ func (wn *msgBroadcaster) preparePeerData(request broadcastRequest, prio bool) ( } // Optionally compress votes: only supporting peers will receive it. if prio && request.tag == protocol.AgreementVoteTag && wn.enableVoteCompression { + networkVoteBroadcastUncompressedBytes.AddUint64(uint64(len(request.data)), nil) var logMsg string compressedData, logMsg = vpackCompressVote(tbytes, request.data) if len(logMsg) > 0 { wn.log.Warn(logMsg) + } else { + // Track compressed size only on success (compressedData includes tag) + networkVoteBroadcastCompressedBytes.AddUint64(uint64(len(compressedData)), nil) } } return mbytes, compressedData, digest @@ -1560,22 +1599,26 @@ type meshRequest struct { done chan struct{} } -func (wn *WebsocketNetwork) meshThreadInner() bool { +func (wn *WebsocketNetwork) meshThreadInner(targetConnCount int) int { wn.refreshRelayArchivePhonebookAddresses() // as long as the call to checkExistingConnectionsNeedDisconnecting is deleting existing connections, we want to // kick off the creation of new connections. + + //nolint:staticcheck // easier to read for { - if wn.checkNewConnectionsNeeded() { + if wn.checkNewConnectionsNeeded(targetConnCount) { // new connections were created. break } - if !wn.checkExistingConnectionsNeedDisconnecting() { + if !wn.outgoingConnsCloser.checkExistingConnectionsNeedDisconnecting(targetConnCount) { // no connection were removed. break } } - return true + + currentConnections := wn.numOutgoingPeers() + wn.numOutgoingPending() + return currentConnections } func (wn *WebsocketNetwork) refreshRelayArchivePhonebookAddresses() { @@ -1617,15 +1660,14 @@ func (wn *WebsocketNetwork) updatePhonebookAddresses(relayAddrs []string, archiv // it returns false if no connections are needed, and true otherwise. // note that the determination of needed connection could be inaccurate, and it might return false while // more connection should be created. -func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { - desired := wn.config.GossipFanout +func (wn *WebsocketNetwork) checkNewConnectionsNeeded(targetConnCount int) bool { numOutgoingTotal := wn.numOutgoingPeers() + wn.numOutgoingPending() - need := desired - numOutgoingTotal + need := targetConnCount - numOutgoingTotal if need <= 0 { return false } // get more than we need so that we can ignore duplicates - newAddrs := wn.phonebook.GetAddresses(desired+numOutgoingTotal, phonebook.RelayRole) + newAddrs := wn.phonebook.GetAddresses(targetConnCount+numOutgoingTotal, phonebook.RelayRole) for _, na := range newAddrs { if na == wn.config.PublicAddress { // filter out self-public address, so we won't try to connect to ourselves. @@ -1644,89 +1686,12 @@ func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { return true } -// checkExistingConnectionsNeedDisconnecting check to see if existing connection need to be dropped due to -// performance issues and/or network being stalled. -func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool { - // we already connected ( or connecting.. ) to GossipFanout peers. - // get the actual peers. - outgoingPeers := wn.outgoingPeers() - if len(outgoingPeers) < wn.config.GossipFanout { - // reset the performance monitor. - wn.connPerfMonitor.Reset([]Peer{}) - return wn.checkNetworkAdvanceDisconnect() - } - - if !wn.connPerfMonitor.ComparePeers(outgoingPeers) { - // different set of peers. restart monitoring. - wn.connPerfMonitor.Reset(outgoingPeers) - } - - // same set of peers. - peerStat := wn.connPerfMonitor.GetPeersStatistics() - if peerStat == nil { - // performance metrics are not yet ready. - return wn.checkNetworkAdvanceDisconnect() - } - - // update peers with the performance metrics we've gathered. - var leastPerformingPeer *wsPeer = nil - for _, stat := range peerStat.peerStatistics { - wsPeer := stat.peer.(*wsPeer) - wsPeer.peerMessageDelay = stat.peerDelay - wn.log.Infof("network performance monitor - peer '%s' delay %d first message portion %d%%", wsPeer.GetAddress(), stat.peerDelay, int(stat.peerFirstMessage*100)) - if wsPeer.throttledOutgoingConnection && leastPerformingPeer == nil { - leastPerformingPeer = wsPeer - } - } - if leastPerformingPeer == nil { - return wn.checkNetworkAdvanceDisconnect() - } - wn.disconnect(leastPerformingPeer, disconnectLeastPerformingPeer) - wn.connPerfMonitor.Reset([]Peer{}) - - return true -} - -// checkNetworkAdvanceDisconnect is using the lastNetworkAdvance indicator to see if the network is currently "stuck". -// if it's seems to be "stuck", a randomally picked peer would be disconnected. -func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool { - lastNetworkAdvance := wn.getLastNetworkAdvance() - if time.Now().UTC().Sub(lastNetworkAdvance) < cliqueResolveInterval { - return false - } - outgoingPeers := wn.outgoingPeers() - if len(outgoingPeers) == 0 { - return false - } - if wn.numOutgoingPending() > 0 { - // we're currently trying to extend the list of outgoing connections. no need to - // disconnect any existing connection to free up room for another connection. - return false - } - var peer *wsPeer - disconnectPeerIdx := crypto.RandUint63() % uint64(len(outgoingPeers)) - peer = outgoingPeers[disconnectPeerIdx].(*wsPeer) - - wn.disconnect(peer, disconnectCliqueResolve) - wn.connPerfMonitor.Reset([]Peer{}) - wn.OnNetworkAdvance() - return true -} - -func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time { - wn.lastNetworkAdvanceMu.Lock() - defer wn.lastNetworkAdvanceMu.Unlock() - return wn.lastNetworkAdvance -} - // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. // this is the only indication that we have that we haven't formed a clique, where all incoming messages // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar // characteristics as with a watchdog timer. func (wn *WebsocketNetwork) OnNetworkAdvance() { - wn.lastNetworkAdvanceMu.Lock() - defer wn.lastNetworkAdvanceMu.Unlock() - wn.lastNetworkAdvance = time.Now().UTC() + wn.outgoingConnsCloser.updateLastAdvance() if wn.nodeInfo != nil && !wn.relayMessages && !wn.config.ForceFetchTransactions { select { case wn.messagesOfInterestRefresh <- struct{}{}: @@ -1945,13 +1910,24 @@ const UserAgentHeader = "User-Agent" // PeerFeaturesHeader is the HTTP header listing features const PeerFeaturesHeader = "X-Algorand-Peer-Features" -// PeerFeatureProposalCompression is a value for PeerFeaturesHeader indicating peer +// peerFeatureProposalCompression is a value for PeerFeaturesHeader indicating peer // supports proposal payload compression with zstd -const PeerFeatureProposalCompression = "ppzstd" +const peerFeatureProposalCompression = "ppzstd" -// PeerFeatureVoteVpackCompression is a value for PeerFeaturesHeader indicating peer +// peerFeatureVoteVpackCompression is a value for PeerFeaturesHeader indicating peer // supports agreement vote message compression with vpack -const PeerFeatureVoteVpackCompression = "avvpack" +const peerFeatureVoteVpackCompression = "avvpack" + +// peerFeatureVoteVpackStateful* are values for PeerFeaturesHeader indicating peer +// supports specific table sizes for stateful vpack compression +const peerFeatureVoteVpackStateful2048 = "avvpack2048" +const peerFeatureVoteVpackStateful1024 = "avvpack1024" +const peerFeatureVoteVpackStateful512 = "avvpack512" +const peerFeatureVoteVpackStateful256 = "avvpack256" +const peerFeatureVoteVpackStateful128 = "avvpack128" +const peerFeatureVoteVpackStateful64 = "avvpack64" +const peerFeatureVoteVpackStateful32 = "avvpack32" +const peerFeatureVoteVpackStateful16 = "avvpack16" var websocketsScheme = map[string]string{"http": "ws", "https": "wss"} @@ -2040,7 +2016,7 @@ func (t *HTTPPAddressBoundTransport) RoundTrip(req *http.Request) (*http.Respons return t.InnerTransport.RoundTrip(req) } -// filterASCII filter out the non-ascii printable characters out of the given input string and +// filterASCII filter out the non-ascii printable characters out of the given input string // and replace these with unprintableCharacterGlyph. // It's used as a security qualifier before logging a network-provided data. // The function allows only characters in the range of [32..126], which excludes all the @@ -2180,8 +2156,10 @@ func (wn *WebsocketNetwork) tryConnect(netAddr, gossipAddr string) { identity: peerID, features: decodePeerFeatures(matchingVersion, response.Header.Get(PeerFeaturesHeader)), enableVoteCompression: wn.config.EnableVoteCompression, + voteCompressionTableSize: wn.voteCompressionTableSize, } peer.TelemetryGUID, peer.InstanceName, _ = getCommonHeaders(response.Header) + wn.log.Debugf("Client: server features '%s', decoded %x", response.Header.Get(PeerFeaturesHeader), peer.features) // if there is a final verification message to send, it means this peer has a verified identity, // attempt to set the peer and identityTracker @@ -2392,11 +2370,9 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) { } // simple duplicate *pointer* check. should never trigger given the callers to addPeer // TODO: remove this after making sure it is safe to do so - for _, p := range wn.peers { - if p == peer { - wn.log.Errorf("dup peer added %#v", peer) - return - } + if slices.Contains(wn.peers, peer) { + wn.log.Errorf("dup peer added %#v", peer) + return } heap.Push(peersHeap{wn}, peer) wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight) @@ -2469,9 +2445,7 @@ func (wn *WebsocketNetwork) registerMessageInterest(t protocol.Tag) { if wn.messagesOfInterest == nil { wn.messagesOfInterest = make(map[protocol.Tag]bool) - for tag, flag := range defaultSendMessageTags { - wn.messagesOfInterest[tag] = flag - } + maps.Copy(wn.messagesOfInterest, defaultSendMessageTags) } wn.messagesOfInterest[t] = true @@ -2485,9 +2459,7 @@ func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) { if wn.messagesOfInterest == nil { wn.messagesOfInterest = make(map[protocol.Tag]bool) - for tag, flag := range defaultSendMessageTags { - wn.messagesOfInterest[tag] = flag - } + maps.Copy(wn.messagesOfInterest, defaultSendMessageTags) } delete(wn.messagesOfInterest, t) diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 07da18f59e..00e5abb060 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -407,7 +407,7 @@ func TestWebsocketNetworkBasicInvalidTags(t *testing.T) { // nolint:paralleltest })}}) // send a message with an invalid tag which is in defaultSendMessageTags. // it should not go through because the defaultSendMessageTags should not be accepted - // and the connection should be dropped dropped + // and the connection should be dropped netA.Broadcast(context.Background(), "XX", []byte("foo"), false, nil) for p := 0; p < 100; p++ { if strings.Contains(logOutput.String(), "wsPeer handleMessageOfInterest: could not unmarshall message from") { @@ -512,6 +512,7 @@ func TestWebsocketVoteCompression(t *testing.T) { cfgA := defaultConfig cfgA.GossipFanout = 1 cfgA.EnableVoteCompression = test.netAEnableCompression + cfgA.StatefulVoteCompressionTableSize = 0 // Disable stateful compression netA := makeTestWebsocketNodeWithConfig(t, cfgA) netA.Start() defer netStop(t, netA, "A") @@ -519,6 +520,7 @@ func TestWebsocketVoteCompression(t *testing.T) { cfgB := defaultConfig cfgB.GossipFanout = 1 cfgB.EnableVoteCompression = test.netBEnableCompression + cfgB.StatefulVoteCompressionTableSize = 0 // Disable stateful compression netB := makeTestWebsocketNodeWithConfig(t, cfgB) addrA, postListen := netA.Address() @@ -588,29 +590,6 @@ func TestWebsocketVoteCompression(t *testing.T) { } } -// Repeat basic, but test a unicast -func TestWebsocketNetworkUnicast(t *testing.T) { - partitiontest.PartitionTest(t) - - netA, _, counter, closeFunc := setupWebsocketNetworkAB(t, 2) - defer closeFunc() - counterDone := counter.done - - require.Equal(t, 1, len(netA.peers)) - require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) - peerB := netA.peers[0] - err := peerB.Unicast(context.Background(), []byte("foo"), protocol.TxnTag) - assert.NoError(t, err) - err = peerB.Unicast(context.Background(), []byte("bar"), protocol.TxnTag) - assert.NoError(t, err) - - select { - case <-counterDone: - case <-time.After(2 * time.Second): - t.Errorf("timeout, count=%d, wanted 2", counter.count) - } -} - // Like a basic test, but really we just want to have SetPeerData()/GetPeerData() func TestWebsocketPeerData(t *testing.T) { partitiontest.PartitionTest(t) @@ -4091,7 +4070,7 @@ func TestTryConnectEarlyWrite(t *testing.T) { time.Sleep(2 * time.Millisecond) } - // Confirm that we successfuly received a message of interest + // Confirm that we successfully received a message of interest assert.Len(t, netA.peers, 1) fmt.Printf("MI Message Count: %v\n", netA.peers[0].miMessageCount.Load()) assert.Equal(t, uint64(1), netA.peers[0].miMessageCount.Load()) diff --git a/network/wsPeer.go b/network/wsPeer.go index 9b7f768233..a40814f2ac 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -19,6 +19,7 @@ package network import ( "context" "encoding/binary" + "errors" "fmt" "io" "net" @@ -77,6 +78,7 @@ var defaultSendMessageTags = map[protocol.Tag]bool{ protocol.TxnTag: true, protocol.UniEnsBlockReqTag: true, protocol.VoteBundleTag: true, + protocol.VotePackedTag: true, // allow VP abort messages to get through the send loop } // interface allows substituting debug implementation for *websocket.Conn @@ -241,6 +243,9 @@ type wsPeer struct { // enableCompression specifies whether this node can compress or decompress votes (and whether it has advertised this) enableVoteCompression bool + // voteCompressionTableSize is this node's configured table size for stateful compression (0 means disabled) + voteCompressionTableSize uint + // responseChannels used by the client to wait on the response of the request responseChannels map[uint64]chan *Response @@ -281,6 +286,9 @@ type wsPeer struct { // peerType defines the peer's underlying connection type // used for separate p2p vs ws metrics peerType peerType + + // msgCodec handles message compression/decompression for this peer + msgCodec *wsPeerMsgCodec } // HTTPPeer is what the opaque Peer might be. @@ -294,10 +302,6 @@ type HTTPPeer interface { // It is possible that we can only initiate a connection to a peer over websockets. type UnicastPeer interface { GetAddress() string - // Unicast sends the given bytes to this specific peer. Does not wait for message to be sent. - Unicast(ctx context.Context, data []byte, tag protocol.Tag) error - // Version returns the matching version from network.SupportedProtocolVersions - Version() string Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error) Respond(ctx context.Context, reqMsg IncomingMessage, outMsg OutgoingMessage) (e error) } @@ -337,11 +341,6 @@ func (wp *wsPeerCore) GetNetwork() GossipNode { return wp.net } -// Version returns the matching version from network.SupportedProtocolVersions -func (wp *wsPeer) Version() string { - return wp.version -} - func (wp *wsPeer) ipAddr() []byte { remote := wp.conn.RemoteAddr() if remote == nil { @@ -390,35 +389,12 @@ func (wp *wsPeer) RoutingAddr() []byte { return ip[0:8] } -// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent. -// (Implements UnicastPeer) -func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error { - var err error - - tbytes := []byte(tag) - mbytes := make([]byte, len(tbytes)+len(msg)) - copy(mbytes, tbytes) - copy(mbytes[len(tbytes):], msg) - var digest crypto.Digest - if tag != protocol.MsgDigestSkipTag && len(msg) >= messageFilterSize { - digest = crypto.Hash(mbytes) - } - - ok := wp.writeNonBlock(ctx, mbytes, false, digest, time.Now()) - if !ok { - networkBroadcastsDropped.Inc(nil) - err = fmt.Errorf("wsPeer failed to unicast: %v", wp.GetAddress()) - } - - return err -} - // GetUnderlyingConnTCPInfo unwraps the connection and returns statistics about it on supported underlying implementations // // (Implements TCPInfoUnicastPeer) func (wp *wsPeer) GetUnderlyingConnTCPInfo() (*util.TCPInfo, error) { // unwrap websocket.Conn, requestTrackedConnection, rejectingLimitListenerConn - var uconn net.Conn = wp.conn.UnderlyingConn() + var uconn = wp.conn.UnderlyingConn() for i := 0; i < 10; i++ { wconn, ok := uconn.(wrappedConn) if !ok { @@ -493,6 +469,9 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) { wp.outgoingMsgFilter = makeMessageFilter(config.OutgoingMessageFilterBucketCount, config.OutgoingMessageFilterBucketSize) } + // Initialize message codec for compression/decompression + wp.msgCodec = makeWsPeerMsgCodec(wp) + wp.wg.Add(2) go wp.readLoop() go wp.writeLoop() @@ -525,7 +504,6 @@ func (wp *wsPeer) readLoop() { }() wp.conn.SetReadLimit(MaxMessageLength) slurper := MakeLimitedReaderSlurper(averageMessageLength, MaxMessageLength) - dataConverter := makeWsPeerMsgDataDecoder(wp) for { msg := IncomingMessage{} @@ -603,15 +581,33 @@ func (wp *wsPeer) readLoop() { networkP2PReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) networkP2PMessageReceivedByTag.Add(string(tag[:]), 1) } - msg.Data, err = dataConverter.convert(msg.Tag, msg.Data) + msg.Data, err = wp.msgCodec.decompress(msg.Tag, msg.Data) if err != nil { + // Handle VP errors by sending abort message and continuing + var vcErr *voteCompressionError + if errors.As(err, &vcErr) { + if close, reason := wp.handleVPError(err); close { + cleanupCloseError = reason + return + } + // Drop this vote and continue reading + continue + } + // Non-VP errors tear down connection wp.reportReadErr(err) return } + // If decompress returned nil (e.g., for abort message), drop the message + if msg.Data == nil { + continue + } if wp.peerType == peerTypeWs { - networkReceivedUncompressedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + networkReceivedUncompressedBytesByTag.Add(string(msg.Tag), uint64(len(msg.Data)+2)) } else { - networkP2PReceivedUncompressedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + networkP2PReceivedUncompressedBytesByTag.Add(string(msg.Tag), uint64(len(msg.Data)+2)) + } + if msg.Tag == protocol.VotePackedTag { // re-tag decompressed VP as AV + msg.Tag = protocol.AgreementVoteTag } msg.Sender = wp @@ -707,8 +703,6 @@ func (wp *wsPeer) readLoop() { } func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (close bool, reason disconnectReason) { - close = false - reason = disconnectReasonNone // decode the message, and ensure it's a valid message. msgTagsMap, err := unmarshallMessageOfInterest(msg.Data) if err != nil { @@ -723,6 +717,13 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (close bool, reas ctx: context.Background(), } + return wp.sendControlMessage(sm) +} + +// sendControlMessage sends a control message (like message-of-interest or VP abort) to the peer. +// It tries to send on the high-priority channel first (non-blocking), then falls back to +// blocking send on either high-priority or bulk channel. +func (wp *wsPeer) sendControlMessage(sm sendMessage) (close bool, reason disconnectReason) { // try to send the message to the send loop. The send loop will store the message locally and would use it. // the rationale here is that this message is rarely sent, and we would benefit from having it being lock-free. select { @@ -744,6 +745,17 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (close bool, reas return } +// handleVPError handles VP (stateful vote compression) errors by sending an abort message +// to the peer, signaling that stateful compression should be disabled for this connection. +// The connection remains open and votes will continue to flow as AV messages. +func (wp *wsPeer) handleVPError(err error) (close bool, reason disconnectReason) { + networkVPAbortMessagesSent.Inc(nil) + abortMsg := append([]byte(protocol.VotePackedTag), voteCompressionAbortMessage) + sm := sendMessage{data: abortMsg, enqueued: time.Now(), peerEnqueued: time.Now(), ctx: context.Background()} + + return wp.sendControlMessage(sm) +} + func (wp *wsPeer) readLoopCleanup(reason disconnectReason) { wp.internalClose(reason) wp.wg.Done() @@ -808,6 +820,28 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason { return disconnectReasonNone } + // Check if we should apply compression + dataToSend := msg.data + if wp.msgCodec != nil { + compressed, err := wp.msgCodec.compress(tag, msg.data) + if err != nil { + // VP compression error - send abort message then continue with original AV + var vcErr *voteCompressionError + if errors.As(err, &vcErr) { + networkVPAbortMessagesSent.Inc(nil) + wp.msgCodec.switchOffStatefulVoteCompression() + abortMsg := append([]byte(protocol.VotePackedTag), voteCompressionAbortMessage) + _ = wp.conn.WriteMessage(websocket.BinaryMessage, abortMsg) + // Fall through to send original AV message below + } + // Note: compressed is already nil, so dataToSend stays as msg.data + } else if compressed != nil { + // Successfully compressed, use the compressed data + dataToSend = compressed + tag = protocol.Tag(compressed[:2]) + } + } + // check if this message was waiting in the queue for too long. If this is the case, return "true" to indicate that we want to close the connection. now := time.Now() msgWaitDuration := now.Sub(msg.enqueued) @@ -819,7 +853,7 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason { wp.intermittentOutgoingMessageEnqueueTime.Store(msg.enqueued.UnixNano()) defer wp.intermittentOutgoingMessageEnqueueTime.Store(0) - err := wp.conn.WriteMessage(websocket.BinaryMessage, msg.data) + err := wp.conn.WriteMessage(websocket.BinaryMessage, dataToSend) if err != nil { if wp.didInnerClose.Load() == 0 { wp.log.Warn("peer write error ", err) @@ -829,14 +863,14 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason { } wp.lastPacketTime.Store(time.Now().UnixNano()) if wp.peerType == peerTypeWs { - networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) - networkSentBytesByTag.Add(string(tag), uint64(len(msg.data))) + networkSentBytesTotal.AddUint64(uint64(len(dataToSend)), nil) + networkSentBytesByTag.Add(string(tag), uint64(len(dataToSend))) networkMessageSentTotal.AddUint64(1, nil) networkMessageSentByTag.Add(string(tag), 1) networkMessageQueueMicrosTotal.AddUint64(uint64(time.Since(msg.peerEnqueued).Nanoseconds()/1000), nil) } else { - networkP2PSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) - networkP2PSentBytesByTag.Add(string(tag), uint64(len(msg.data))) + networkP2PSentBytesTotal.AddUint64(uint64(len(dataToSend)), nil) + networkP2PSentBytesByTag.Add(string(tag), uint64(len(dataToSend))) networkP2PMessageSentTotal.AddUint64(1, nil) networkP2PMessageSentByTag.Add(string(tag), 1) networkP2PMessageQueueMicrosTotal.AddUint64(uint64(time.Since(msg.peerEnqueued).Nanoseconds()/1000), nil) @@ -1076,9 +1110,10 @@ func (wp *wsPeer) setPeerData(key string, value interface{}) { } func (wp *wsPeer) sendMessagesOfInterest(messagesOfInterestGeneration uint32, messagesOfInterestEnc []byte) { - err := wp.Unicast(wp.netCtx, messagesOfInterestEnc, protocol.MsgOfInterestTag) - if err != nil { - wp.log.Errorf("ws send msgOfInterest: %v", err) + mbytes := append([]byte(protocol.MsgOfInterestTag), messagesOfInterestEnc...) + ok := wp.writeNonBlock(wp.netCtx, mbytes, true, crypto.Digest{}, time.Now()) + if !ok { + wp.log.Errorf("ws send msgOfInterest: failed to send to %v", wp.GetAddress()) } else { wp.messagesOfInterestGeneration.Store(messagesOfInterestGeneration) } @@ -1098,12 +1133,61 @@ func (wp *wsPeer) vpackVoteCompressionSupported() bool { return wp.features&pfCompressedVoteVpack != 0 } +func (wp *wsPeer) vpackStatefulCompressionSupported() bool { + return wp.features&(pfCompressedVoteVpackStateful2048| + pfCompressedVoteVpackStateful1024| + pfCompressedVoteVpackStateful512| + pfCompressedVoteVpackStateful256| + pfCompressedVoteVpackStateful128| + pfCompressedVoteVpackStateful64| + pfCompressedVoteVpackStateful32| + pfCompressedVoteVpackStateful16) != 0 +} + +// getBestVpackTableSize returns the negotiated table size. +// This calculates the minimum between our max size and the peer's advertised max size. +func (wp *wsPeer) getBestVpackTableSize() uint { + // Get peer's max size from their features + var peerMaxSize uint + switch { + case wp.features&pfCompressedVoteVpackStateful2048 != 0: + peerMaxSize = 2048 + case wp.features&pfCompressedVoteVpackStateful1024 != 0: + peerMaxSize = 1024 + case wp.features&pfCompressedVoteVpackStateful512 != 0: + peerMaxSize = 512 + case wp.features&pfCompressedVoteVpackStateful256 != 0: + peerMaxSize = 256 + case wp.features&pfCompressedVoteVpackStateful128 != 0: + peerMaxSize = 128 + case wp.features&pfCompressedVoteVpackStateful64 != 0: + peerMaxSize = 64 + case wp.features&pfCompressedVoteVpackStateful32 != 0: + peerMaxSize = 32 + case wp.features&pfCompressedVoteVpackStateful16 != 0: + peerMaxSize = 16 + default: + peerMaxSize = 0 // Peer doesn't support stateful vote compression + } + + // Return the minimum between our max size and peer's max size + return min(wp.voteCompressionTableSize, peerMaxSize) +} + //msgp:ignore peerFeatureFlag type peerFeatureFlag int const ( pfCompressedProposal peerFeatureFlag = 1 << iota pfCompressedVoteVpack + pfCompressedVoteVpackStateful2048 + pfCompressedVoteVpackStateful1024 + pfCompressedVoteVpackStateful512 + pfCompressedVoteVpackStateful256 + pfCompressedVoteVpackStateful128 + pfCompressedVoteVpackStateful64 + pfCompressedVoteVpackStateful32 + pfCompressedVoteVpackStateful16 ) // versionPeerFeatures defines protocol version when peer features were introduced @@ -1142,14 +1226,28 @@ func decodePeerFeatures(version string, announcedFeatures string) peerFeatureFla } var features peerFeatureFlag - parts := strings.Split(announcedFeatures, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == PeerFeatureProposalCompression { + for f := range strings.SplitSeq(announcedFeatures, ",") { + switch strings.TrimSpace(f) { + case peerFeatureProposalCompression: features |= pfCompressedProposal - } - if part == PeerFeatureVoteVpackCompression { + case peerFeatureVoteVpackCompression: features |= pfCompressedVoteVpack + case peerFeatureVoteVpackStateful2048: + features |= pfCompressedVoteVpackStateful2048 + case peerFeatureVoteVpackStateful1024: + features |= pfCompressedVoteVpackStateful1024 + case peerFeatureVoteVpackStateful512: + features |= pfCompressedVoteVpackStateful512 + case peerFeatureVoteVpackStateful256: + features |= pfCompressedVoteVpackStateful256 + case peerFeatureVoteVpackStateful128: + features |= pfCompressedVoteVpackStateful128 + case peerFeatureVoteVpackStateful64: + features |= pfCompressedVoteVpackStateful64 + case peerFeatureVoteVpackStateful32: + features |= pfCompressedVoteVpackStateful32 + case peerFeatureVoteVpackStateful16: + features |= pfCompressedVoteVpackStateful16 } } return features diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go index 02fa324a08..3fc21e9d09 100644 --- a/network/wsPeer_test.go +++ b/network/wsPeer_test.go @@ -25,6 +25,7 @@ import ( "io" "net" "path/filepath" + "slices" "sort" "strings" "sync/atomic" @@ -159,16 +160,16 @@ func TestVersionToFeature(t *testing.T) { {"1.2.3", "", peerFeatureFlag(0)}, {"a.b", "", peerFeatureFlag(0)}, {"2.1", "", peerFeatureFlag(0)}, - {"2.1", PeerFeatureProposalCompression, peerFeatureFlag(0)}, + {"2.1", peerFeatureProposalCompression, peerFeatureFlag(0)}, {"2.2", "", peerFeatureFlag(0)}, {"2.2", "test", peerFeatureFlag(0)}, {"2.2", strings.Join([]string{"a", "b"}, ","), peerFeatureFlag(0)}, - {"2.2", PeerFeatureProposalCompression, pfCompressedProposal}, - {"2.2", strings.Join([]string{PeerFeatureProposalCompression, "test"}, ","), pfCompressedProposal}, - {"2.2", strings.Join([]string{PeerFeatureProposalCompression, "test"}, ", "), pfCompressedProposal}, - {"2.2", strings.Join([]string{PeerFeatureProposalCompression, PeerFeatureVoteVpackCompression}, ","), pfCompressedVoteVpack | pfCompressedProposal}, - {"2.2", PeerFeatureVoteVpackCompression, pfCompressedVoteVpack}, - {"2.3", PeerFeatureProposalCompression, pfCompressedProposal}, + {"2.2", peerFeatureProposalCompression, pfCompressedProposal}, + {"2.2", strings.Join([]string{peerFeatureProposalCompression, "test"}, ","), pfCompressedProposal}, + {"2.2", strings.Join([]string{peerFeatureProposalCompression, "test"}, ", "), pfCompressedProposal}, + {"2.2", strings.Join([]string{peerFeatureProposalCompression, peerFeatureVoteVpackCompression}, ","), pfCompressedVoteVpack | pfCompressedProposal}, + {"2.2", peerFeatureVoteVpackCompression, pfCompressedVoteVpack}, + {"2.3", peerFeatureProposalCompression, pfCompressedProposal}, } for i, test := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { @@ -233,6 +234,8 @@ func TestPeerReadLoopSwitchAllTags(t *testing.T) { }) require.True(t, readLoopFound) require.NotEmpty(t, foundTags) + // Filter out VP, it's normalized to AV before the switch statement + allTags = slices.DeleteFunc(allTags, func(tag string) bool { return tag == "VotePackedTag" }) sort.Strings(allTags) sort.Strings(foundTags) require.Equal(t, allTags, foundTags) diff --git a/node/follower_node_test.go b/node/follower_node_test.go index 6c4e357ea3..bd18dc3595 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -309,12 +309,13 @@ func TestSimulate(t *testing.T) { round := node.ledger.LastRound() + proto := config.Consensus[protocol.ConsensusFuture] stxn := txntest.Txn{ Type: protocol.PaymentTx, Sender: testAddr, Receiver: poolAddr, Amount: 1, - Fee: 1000, + Fee: proto.MinTxnFee, FirstValid: round, LastValid: round + 1000, GenesisHash: node.ledger.GenesisHash(), diff --git a/node/node_test.go b/node/node_test.go index 0125d628a6..42c1dbf9f4 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -404,9 +404,8 @@ func TestSimpleUpgrade(t *testing.T) { t.Skip("Test takes ~50 seconds.") } - if (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" && runtime.GOOS != "darwin") && - strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" { - t.Skip("Test is too heavy for amd64 builder running in parallel with other packages") + if runtime.GOOS == "darwin" && strings.ToUpper(os.Getenv("GITHUB_ACTIONS")) == "TRUE" { + t.Skip("Test is too heavy for macOS builder running in parallel with other packages") } // ConsensusTest0 is a version of ConsensusV0 used for testing @@ -840,6 +839,8 @@ func TestMaxSizesCorrect(t *testing.T) { */ //////////////////////////////////////////////// avSize := uint64(agreement.UnauthenticatedVoteMaxSize()) require.Equal(t, avSize, protocol.AgreementVoteTag.MaxMessageSize()) + // VP tag should have the same max size as AV tag + require.Equal(t, avSize, protocol.VotePackedTag.MaxMessageSize()) miSize := uint64(network.MessageOfInterestMaxSize()) require.Equal(t, miSize, protocol.MsgOfInterestTag.MaxMessageSize()) npSize := uint64(NetPrioResponseSignedMaxSize()) @@ -868,7 +869,7 @@ func TestMaxSizesCorrect(t *testing.T) { // subtract out the two smaller signature sizes (logicsig is biggest, it can *contain* the others) maxCombinedTxnSize -= uint64(crypto.SignatureMaxSize() + crypto.MultisigSigMaxSize()) // the logicsig size is *also* an overestimate, because it thinks that the logicsig and - // the logicsig args can both be up to to MaxLogicSigMaxSize, but that's the max for + // the logicsig args can both be up to MaxLogicSigMaxSize, but that's the max for // them combined, so it double counts and we have to subtract one. maxCombinedTxnSize -= uint64(bounds.MaxLogicSigMaxSize) @@ -1020,6 +1021,10 @@ func TestNodeHybridTopology(t *testing.T) { return node0Conn && node1Conn && node2Conn }, 60*time.Second, 500*time.Millisecond) + // node 0 has GossipFanout=0 but we still want to run all the machinery to update phonebooks + // (it this particular case to update peerstore with DHT nodes) + nodes[0].net.RequestConnectOutgoing(false, nil) + initialRound := nodes[0].ledger.NextRound() targetRound := initialRound + 10 @@ -1293,7 +1298,7 @@ func TestNodeHybridP2PGossipSend(t *testing.T) { Sender: addr2, FirstValid: 1, LastValid: 100, - Fee: basics.MicroAlgos{Raw: 1000}, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee}, GenesisID: nodes[2].genesisID, GenesisHash: nodes[2].genesisHash, }, diff --git a/protocol/codec.go b/protocol/codec.go index 7aed14f09a..f85d67b9bc 100644 --- a/protocol/codec.go +++ b/protocol/codec.go @@ -258,7 +258,7 @@ type MsgpDecoderBytes struct { pos int } -// Decode an objptr from from a byte stream +// Decode an objptr from a byte stream func (d *MsgpDecoderBytes) Decode(objptr msgp.Unmarshaler) error { if !objptr.CanUnmarshalMsg(objptr) { return fmt.Errorf("object %T cannot be msgp-unmashalled", objptr) diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go index 7e6cb02919..08ff7ee686 100644 --- a/protocol/codec_tester.go +++ b/protocol/codec_tester.go @@ -42,7 +42,7 @@ type msgpMarshalUnmarshal interface { msgp.Unmarshaler } -var rawMsgpType = reflect.TypeOf(msgp.Raw{}) +var rawMsgpType = reflect.TypeFor[msgp.Raw]() var errSkipRawMsgpTesting = fmt.Errorf("skipping msgp.Raw serializing, since it won't be the same across go-codec and msgp") func oneOf(n int) bool { @@ -54,6 +54,10 @@ type randomizeObjectCfg struct { ZeroesEveryN int // AllUintSizes will be equally likely to generate 8-bit, 16-bit, 32-bit, or 64-bit uints. AllUintSizes bool + // MaxCollectionLen bounds randomized slice/map lengths when positive. + MaxCollectionLen int + // SilenceAllocWarnings suppresses allocbound warning prints. + SilenceAllocWarnings bool } // RandomizeObjectOption is an option for RandomizeObject @@ -69,6 +73,20 @@ func RandomizeObjectWithAllUintSizes() RandomizeObjectOption { return func(cfg *randomizeObjectCfg) { cfg.AllUintSizes = true } } +// RandomizeObjectSilenceAllocWarnings silences allocbound warning prints. +func RandomizeObjectSilenceAllocWarnings() RandomizeObjectOption { + return func(cfg *randomizeObjectCfg) { cfg.SilenceAllocWarnings = true } +} + +// RandomizeObjectWithMaxCollectionLen limits randomized slice/map lengths to n (when n>0). +func RandomizeObjectWithMaxCollectionLen(n int) RandomizeObjectOption { + return func(cfg *randomizeObjectCfg) { + if n > 0 { + cfg.MaxCollectionLen = n + } + } +} + // RandomizeObject returns a random object of the same type as template func RandomizeObject(template interface{}, opts ...RandomizeObjectOption) (interface{}, error) { cfg := randomizeObjectCfg{} @@ -104,7 +122,7 @@ func RandomizeObjectField(template interface{}, opts ...RandomizeObjectOption) ( func parseStructTags(structTag string) map[string]string { tagsMap := map[string]string{} - for _, tag := range strings.Split(reflect.StructTag(structTag).Get("codec"), ",") { + for tag := range strings.SplitSeq(reflect.StructTag(structTag).Get("codec"), ",") { elements := strings.Split(tag, "=") if len(elements) != 2 { continue @@ -185,7 +203,7 @@ func checkMsgpAllocBoundDirective(dataType reflect.Type) bool { return false } -func checkBoundsLimitingTag(val reflect.Value, datapath string, structTag string) (hasAllocBound bool) { +func checkBoundsLimitingTag(val reflect.Value, datapath string, structTag string, cfg randomizeObjectCfg) (hasAllocBound bool) { var objType string if val.Kind() == reflect.Slice { objType = "slice" @@ -199,7 +217,9 @@ func checkBoundsLimitingTag(val reflect.Value, datapath string, structTag string tagsMap := parseStructTags(structTag) if tagsMap["allocbound"] == "-" { - printWarning(fmt.Sprintf("%s %s have an unbounded allocbound defined", objType, datapath)) + if !cfg.SilenceAllocWarnings { + printWarning(fmt.Sprintf("%s %s have an unbounded allocbound defined", objType, datapath)) + } return } @@ -234,7 +254,9 @@ func checkBoundsLimitingTag(val reflect.Value, datapath string, structTag string } if val.Type().Kind() == reflect.Slice || val.Type().Kind() == reflect.Map || val.Type().Kind() == reflect.Array { - printWarning(fmt.Sprintf("%s %s does not have an allocbound defined for %s %s", objType, datapath, val.Type().String(), val.Type().PkgPath())) + if !cfg.SilenceAllocWarnings { + printWarning(fmt.Sprintf("%s %s does not have an allocbound defined for %s %s", objType, datapath, val.Type().String(), val.Type().PkgPath())) + } } return } @@ -285,7 +307,7 @@ func randomizeValue(v reflect.Value, depth int, datapath string, tag string, rem v.SetInt(int64(rand.Uint64())) *remainingChanges-- case reflect.String: - hasAllocBound := checkBoundsLimitingTag(v, datapath, tag) + hasAllocBound := checkBoundsLimitingTag(v, datapath, tag, cfg) var buf []byte var len int if strings.HasSuffix(v.Type().PkgPath(), "go-algorand/agreement") && v.Type().Name() == "serializableError" { @@ -359,9 +381,13 @@ func randomizeValue(v reflect.Value, depth int, datapath string, tag string, rem case reflect.Slice: // we don't want to allocate a slice with size of 0. This is because decoding and encoding this slice // will result in nil and not slice of size 0 - l := rand.Int()%31 + 1 + maxLen := 31 + if cfg.MaxCollectionLen > 0 { + maxLen = min(maxLen, cfg.MaxCollectionLen) + } + l := rand.Intn(maxLen) + 1 - hasAllocBound := checkBoundsLimitingTag(v, datapath, tag) + hasAllocBound := checkBoundsLimitingTag(v, datapath, tag, cfg) if hasAllocBound { l = 1 } @@ -382,10 +408,15 @@ func randomizeValue(v reflect.Value, depth int, datapath string, tag string, rem v.SetBool(rand.Uint32()%2 == 0) *remainingChanges-- case reflect.Map: - hasAllocBound := checkBoundsLimitingTag(v, datapath, tag) + hasAllocBound := checkBoundsLimitingTag(v, datapath, tag, cfg) mt := v.Type() v.Set(reflect.MakeMap(mt)) - l := rand.Int() % 32 + maxLen := 32 + if cfg.MaxCollectionLen > 0 { + // preserve possibility of zero entries while capping positive lengths + maxLen = min(maxLen, cfg.MaxCollectionLen+1) + } + l := rand.Intn(maxLen) if hasAllocBound { l = 1 } diff --git a/protocol/msgp_gen.go b/protocol/msgp_gen.go index 76c2639896..a6288eac0b 100644 --- a/protocol/msgp_gen.go +++ b/protocol/msgp_gen.go @@ -207,7 +207,6 @@ func (z Error) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func ErrorMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -267,7 +266,6 @@ func (z HashID) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func HashIDMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -327,7 +325,6 @@ func (z NetworkID) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func NetworkIDMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler @@ -447,7 +444,6 @@ func (z Tag) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func TagMaxSize() (s int) { panic("Unable to determine max size: String type string(z) is unbounded") - return } // MarshalMsg implements msgp.Marshaler diff --git a/protocol/tags.go b/protocol/tags.go index ab905d2acc..61ae80a7c4 100644 --- a/protocol/tags.go +++ b/protocol/tags.go @@ -44,6 +44,7 @@ const ( //UniEnsBlockResTag Tag = "US" was used for wsfetcherservice //UniCatchupResTag Tag = "UT" was used for wsfetcherservice VoteBundleTag Tag = "VB" + VotePackedTag Tag = "VP" // Statefully compressed votes ) // The following constants are overestimates in some cases but are reasonable upper bounds @@ -54,7 +55,7 @@ const ( const AgreementVoteTagMaxSize = 1228 // MsgOfInterestTagMaxSize is the maximum size of a MsgOfInterestTag message -const MsgOfInterestTagMaxSize = 45 +const MsgOfInterestTagMaxSize = 48 // MsgDigestSkipTagMaxSize is the maximum size of a MsgDigestSkipTag message const MsgDigestSkipTagMaxSize = 69 @@ -103,6 +104,10 @@ const UniEnsBlockReqTagMaxSize = 67 // Matches current network.MaxMessageLength const VoteBundleTagMaxSize = 6 * 1024 * 1024 +// VotePackedTagMaxSize is the maximum size of a VotePackedTag message (statefully compressed vote) +// This is smaller than AgreementVoteTagMaxSize due to compression +const VotePackedTagMaxSize = AgreementVoteTagMaxSize + // MaxMessageSize returns the maximum size of a message for a given tag func (tag Tag) MaxMessageSize() uint64 { switch tag { @@ -128,6 +133,8 @@ func (tag Tag) MaxMessageSize() uint64 { return UniEnsBlockReqTagMaxSize case VoteBundleTag: return VoteBundleTagMaxSize + case VotePackedTag: + return VotePackedTagMaxSize default: return 0 // Unknown tag } @@ -146,6 +153,7 @@ var TagList = []Tag{ TxnTag, UniEnsBlockReqTag, VoteBundleTag, + VotePackedTag, } // DeprecatedTagList contains tags that are no longer used, but may still show up in MsgOfInterest messages. diff --git a/protocol/tags_test.go b/protocol/tags_test.go index b00c99c2ac..66cff0d762 100644 --- a/protocol/tags_test.go +++ b/protocol/tags_test.go @@ -289,6 +289,7 @@ func TestLockdownTagList(t *testing.T) { TxnTag, UniEnsBlockReqTag, VoteBundleTag, + VotePackedTag, } require.Equal(t, len(tagList), len(TagList)) tagMap := make(map[Tag]bool) diff --git a/protocol/transcode/core.go b/protocol/transcode/core.go index 11ddb6a604..993131f932 100644 --- a/protocol/transcode/core.go +++ b/protocol/transcode/core.go @@ -91,6 +91,36 @@ func Transcode(mpToJSON bool, base32Encoding, strictJSON bool, in io.Reader, out } } +func isSliceOfBytes(a interface{}) bool { + switch v := a.(type) { + case []interface{}: + for _, e := range v { + _, ok := e.([]byte) + if !ok { + return false + } + } + return len(v) > 0 // No need to treat empty slice specially + default: + return false + } +} + +func isSliceOfString(a interface{}) bool { + switch v := a.(type) { + case []interface{}: + for _, e := range v { + _, ok := e.(string) + if !ok { + return false + } + } + return len(v) > 0 // No need to treat empty slice specially + default: + return false + } +} + func toJSON(a interface{}, base32Encoding, strictJSON bool) interface{} { switch v := a.(type) { case map[interface{}]interface{}: @@ -100,16 +130,23 @@ func toJSON(a interface{}, base32Encoding, strictJSON bool) interface{} { // a []byte, base64-encode the entry and append // ":b64" to the key (or, if the base32Encoding flag // is set, base32-encode and append ":b32"). - ks, ok1 := k.(string) - eb, ok2 := e.([]byte) + ks, keyIsString := k.(string) + eb, entryIsBytes := e.([]byte) - if ok1 && ok2 { + switch { + case keyIsString && entryIsBytes: if base32Encoding { r[fmt.Sprintf("%s:b32", ks)] = base32.StdEncoding.EncodeToString(eb) } else { r[fmt.Sprintf("%s:b64", ks)] = base64.StdEncoding.EncodeToString(eb) } - } else { + case keyIsString && isSliceOfBytes(e): + if base32Encoding { + r[fmt.Sprintf("%s:b32", ks)] = toJSON(e, base32Encoding, strictJSON) + } else { + r[fmt.Sprintf("%s:b64", ks)] = toJSON(e, base32Encoding, strictJSON) + } + default: if strictJSON { k = fmt.Sprintf("%v", k) } @@ -133,6 +170,28 @@ func toJSON(a interface{}, base32Encoding, strictJSON bool) interface{} { } } +func decodeSliceOfString(a interface{}, decodeFunc func(string) ([]byte, error)) ([][]byte, error) { + v, ok := a.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected []interface{} for decodeSliceOfString") + } + + var all [][]byte + for _, e := range v { + es, entryIsString := e.(string) + if !entryIsString { + return nil, fmt.Errorf("expected string element in slice") + } + decoded, err := decodeFunc(es) + if err != nil { + return nil, err + } + all = append(all, decoded) + } + + return all, nil +} + func fromJSON(a interface{}) interface{} { switch v := a.(type) { case map[interface{}]interface{}: @@ -142,24 +201,35 @@ func fromJSON(a interface{}) interface{} { // ":b64", and entry is a string, then base64-decode // the entry and drop the ":b64" from the key. // Same for ":b32" and base32-decoding. - ks, ok1 := k.(string) - es, ok2 := e.(string) + ks, keyIsString := k.(string) + es, entryIsString := e.(string) - if ok1 && ok2 && strings.HasSuffix(ks, ":b64") { + switch { + case keyIsString && strings.HasSuffix(ks, ":b64") && entryIsString: eb, err := base64.StdEncoding.DecodeString(es) if err != nil { panic(err) } - r[ks[:len(ks)-4]] = eb - } else if ok1 && ok2 && strings.HasSuffix(ks, ":b32") { + case keyIsString && strings.HasSuffix(ks, ":b32") && entryIsString: eb, err := base32.StdEncoding.DecodeString(es) if err != nil { panic(err) } - r[ks[:len(ks)-4]] = eb - } else { + case keyIsString && strings.HasSuffix(ks, ":b64") && isSliceOfString(e): + eb, err := decodeSliceOfString(e, base64.StdEncoding.DecodeString) + if err != nil { + panic(err) + } + r[ks[:len(ks)-4]] = eb + case keyIsString && strings.HasSuffix(ks, ":b32") && isSliceOfString(e): + eb, err := decodeSliceOfString(e, base32.StdEncoding.DecodeString) + if err != nil { + panic(err) + } + r[ks[:len(ks)-4]] = eb + default: r[fromJSON(k)] = fromJSON(e) } } @@ -167,29 +237,40 @@ func fromJSON(a interface{}) interface{} { case map[string]interface{}: r := make(map[string]interface{}) - for k, e := range v { + for ks, e := range v { // Special case: if key ends in ":b64", and entry // is a string, then base64-decode the entry and // drop the ":b64" from the key. Same for ":b32" // and base32-decoding. - es, ok := e.(string) + es, entryIsString := e.(string) - if ok && strings.HasSuffix(k, ":b64") { + switch { + case strings.HasSuffix(ks, ":b64") && entryIsString: eb, err := base64.StdEncoding.DecodeString(es) if err != nil { panic(err) } - - r[k[:len(k)-4]] = eb - } else if ok && strings.HasSuffix(k, ":b32") { + r[ks[:len(ks)-4]] = eb + case strings.HasSuffix(ks, ":b32") && entryIsString: eb, err := base32.StdEncoding.DecodeString(es) if err != nil { panic(err) } - - r[k[:len(k)-4]] = eb - } else { - r[k] = fromJSON(e) + r[ks[:len(ks)-4]] = eb + case strings.HasSuffix(ks, ":b64") && isSliceOfString(e): + eb, err := decodeSliceOfString(e, base64.StdEncoding.DecodeString) + if err != nil { + panic(err) + } + r[ks[:len(ks)-4]] = eb + case strings.HasSuffix(ks, ":b32") && isSliceOfString(e): + eb, err := decodeSliceOfString(e, base32.StdEncoding.DecodeString) + if err != nil { + panic(err) + } + r[ks[:len(ks)-4]] = eb + default: + r[ks] = fromJSON(e) } } return r diff --git a/protocol/transcode/core_test.go b/protocol/transcode/core_test.go index 8dec7b1ba5..72ceee9c6f 100644 --- a/protocol/transcode/core_test.go +++ b/protocol/transcode/core_test.go @@ -18,6 +18,7 @@ package transcode import ( "encoding/base32" + "encoding/base64" "fmt" "io" "testing" @@ -58,7 +59,8 @@ func testIdempotentRoundtrip(t *testing.T, mpdata []byte) { res, err := io.ReadAll(p3out) require.NoError(t, err) - require.Equal(t, mpdata, res) + require.Equal(t, mpdata, res, + "%v != %v", base64.StdEncoding.EncodeToString(mpdata), base64.StdEncoding.EncodeToString(res)) } type objectType int @@ -117,27 +119,39 @@ func randomObjectOfType(randtype uint64, width int, depth int) interface{} { return base32.StdEncoding.EncodeToString(buf[:]) case objectArray: var arr [2]interface{} - for i := 0; i < len(arr); i++ { + if crypto.RandUint64()%2 == 0 { // half the time, make the slice a uniform type t := crypto.RandUint64() - if t%uint64(objectTypeMax) == uint64(objectBytes) { - // We cannot cleanly pass through an array of - // binary blobs. - t++ + for i := range arr { + arr[i] = randomObjectOfType(t, width, depth-1) + } + } else { + for i := range arr { + t := crypto.RandUint64() + if t%uint64(objectTypeMax) == uint64(objectBytes) { + // We cannot cleanly handle binary blobs unless the entire array is. + t++ + } + arr[i] = randomObjectOfType(t, width, depth-1) } - arr[i] = randomObjectOfType(t, width, depth-1) } return arr case objectSlice: slice := make([]interface{}, 0) sz := crypto.RandUint64() % uint64(width) - for i := 0; i < int(sz); i++ { + if crypto.RandUint64()%2 == 0 { // half the time, make the slice a uniform type t := crypto.RandUint64() - if t%uint64(objectTypeMax) == uint64(objectBytes) { - // We cannot cleanly pass through an array of - // binary blobs. - t++ + for range sz { + slice = append(slice, randomObjectOfType(t, width, depth-1)) + } + } else { + for range sz { + t := crypto.RandUint64() + if t%uint64(objectTypeMax) == uint64(objectBytes) { + // We cannot cleanly handle binary blobs unless the entire slice is. + t++ + } + slice = append(slice, randomObjectOfType(t, width, depth-1)) } - slice = append(slice, randomObjectOfType(t, width, depth-1)) } return slice case objectMap: @@ -172,7 +186,7 @@ func TestIdempotence(t *testing.T) { } for i := 0; i < niter; i++ { - o := randomMap(6, 3) + o := randomMap(i%7, i%3) testIdempotentRoundtrip(t, protocol.EncodeReflect(o)) } } @@ -189,7 +203,7 @@ func TestIdempotenceMultiobject(t *testing.T) { nobj := crypto.RandUint64() % 8 buf := []byte{} for j := 0; j < int(nobj); j++ { - buf = append(buf, protocol.EncodeReflect(randomMap(6, 3))...) + buf = append(buf, protocol.EncodeReflect(randomMap(i%7, i%3))...) } testIdempotentRoundtrip(t, buf) } diff --git a/rpcs/blockService.go b/rpcs/blockService.go index e1b296e12b..03a4c4ebb7 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -505,8 +505,8 @@ func makeFallbackEndpoints(log logging.Logger, customFallbackEndpoints string) ( if customFallbackEndpoints == "" { return } - endpoints := strings.Split(customFallbackEndpoints, ",") - for _, ep := range endpoints { + endpoints := strings.SplitSeq(customFallbackEndpoints, ",") + for ep := range endpoints { if addr.IsMultiaddr(ep) { fe.endpoints = append(fe.endpoints, ep) } else { diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index ad714c02aa..494b4772ae 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -52,12 +52,6 @@ type mockUnicastPeer struct { func (mup *mockUnicastPeer) GetAddress() string { return "" } -func (mup *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protocol.Tag) error { - return nil -} -func (mup *mockUnicastPeer) Version() string { - return "2.1" -} // GetConnectionLatency returns the connection latency between the local node and this peer. func (mup *mockUnicastPeer) GetConnectionLatency() time.Duration { @@ -315,7 +309,7 @@ func TestRedirectOnFullCapacity(t *testing.T) { var blk bookkeeping.Block var l2Failed bool xDone := 1000 - // Keep on sending 4 simultanious requests to the first node, to force it to redirect to node 2 + // Keep on sending 4 simultaneous requests to the first node, to force it to redirect to node 2 // then check the timestamp from the block header to confirm the redirection took place var x int forloop: @@ -350,8 +344,7 @@ forloop: require.Equal(t, "3", responses[p].Header["Retry-After"][0]) continue } - // parse the block to get the header timestamp - // timestamp is needed to know which node served the block + // parse the block to get the header timestamp which is needed to know which node served the block require.Equal(t, http.StatusOK, responses[p].StatusCode) bodyData, err := io.ReadAll(responses[p].Body) require.NoError(t, err) diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index 60459d9f98..3265ba3781 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -241,7 +241,7 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R return } defer decompressedGzip.Close() - written, err := io.Copy(response, decompressedGzip) + written, err := io.Copy(response, decompressedGzip) //nolint:gosec // writing to the network from a local file, no "decompression bomb" if err != nil { logging.Base().Infof("LedgerService.ServeHTTP : unable to write decompressed catchpoint file for round %d, written bytes %d : %v", round, written, err) } else { diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh index 75e74fd8f8..d4f5b0c848 100755 --- a/scripts/buildtools/install_buildtools.sh +++ b/scripts/buildtools/install_buildtools.sh @@ -88,7 +88,7 @@ if [[ "${BUILDTOOLS_INSTALL}" != "ALL" ]]; then fi install_go_module golang.org/x/tools golang.org/x/tools/cmd/stringer -install_go_module github.com/go-swagger/go-swagger github.com/go-swagger/go-swagger/cmd/swagger +install_go_module github.com/algorand/go-swagger github.com/algorand/go-swagger/cmd/swagger install_go_module github.com/algorand/msgp install_go_module gotest.tools/gotestsum -install_go_module github.com/golangci/golangci-lint/cmd/golangci-lint +install_go_module github.com/golangci/golangci-lint/v2/cmd/golangci-lint diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions index 330cd8bf90..d22acec8cc 100644 --- a/scripts/buildtools/versions +++ b/scripts/buildtools/versions @@ -1,6 +1,6 @@ golang.org/x/lint v0.0.0-20241112194109-818c5a804067 golang.org/x/tools v0.27.0 -github.com/algorand/msgp v1.1.60 -github.com/go-swagger/go-swagger v0.31.0 -gotest.tools/gotestsum v1.12.0 -github.com/golangci/golangci-lint/cmd/golangci-lint v1.62.0 +github.com/algorand/msgp v1.1.61 +github.com/algorand/go-swagger v0.0.0-20251018003531-2ea7c750dcac +gotest.tools/gotestsum v1.13.0 +github.com/golangci/golangci-lint/v2/cmd/golangci-lint v2.6.0 diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh index 9b2692454d..a0fa989018 100755 --- a/scripts/get_golang_version.sh +++ b/scripts/get_golang_version.sh @@ -11,7 +11,7 @@ # Our build task-runner `mule` will refer to this script and will automatically # build a new image whenever the version number has been changed. -BUILD=1.23.9 +BUILD=1.25.3 MIN=$(echo $BUILD | cut -d. -f1-2).0 if [ "$1" = all ] diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh index 934379b5b8..393f8e6965 100755 --- a/scripts/travis/codegen_verification.sh +++ b/scripts/travis/codegen_verification.sh @@ -36,6 +36,9 @@ make generate echo "Running fixcheck" "$GOPATH"/bin/algofix -error */ +echo "Running modernize checks" +make modernize + echo "Running expect linter" make expectlint diff --git a/shared/algoh/config.go b/shared/algoh/config.go index a11c60c548..d079510ca5 100644 --- a/shared/algoh/config.go +++ b/shared/algoh/config.go @@ -83,13 +83,13 @@ func LoadConfigFromFile(file string) (cfg HostConfig, err error) { return cfg, err } -// Save pretty-prints the configuration into the the specified file. +// Save pretty-prints the configuration into the specified file. func (cfg HostConfig) Save(file string) error { prettyPrint := true return codecs.SaveObjectToFile(file, cfg, prettyPrint) } -// Dump pretty-prints the configuration into the the specified stream. +// Dump pretty-prints the configuration into the specified stream. func (cfg HostConfig) Dump(stream io.Writer) { enc := codecs.NewFormattedJSONEncoder(stream) enc.Encode(cfg) diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go index 3b86e868a3..6ea255ff26 100644 --- a/shared/pingpong/accounts.go +++ b/shared/pingpong/accounts.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "fmt" "log" + "maps" "math/rand" "os" "path/filepath" @@ -385,6 +386,7 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) { return } assetsNeeded := int(pps.cfg.NumAsset) - len(pps.cinfo.AssetParams) + assetsToCreate := assetsNeeded // Save original count for later use newAssetAddrs := make(map[string]*pingPongAccount, assetsNeeded) for addr, acct := range pps.accounts { if assetsNeeded <= 0 { @@ -418,9 +420,9 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) { newAssetAddrs[addr] = acct } // wait for new assets to be created, fetch account data for them - newAssets := make(map[basics.AssetIndex]model.AssetParams, assetsNeeded) + newAssets := make(map[basics.AssetIndex]model.AssetParams, assetsToCreate) timeout := time.Now().Add(10 * time.Second) - for len(newAssets) < assetsNeeded { + for len(newAssets) < assetsToCreate { for addr, acct := range newAssetAddrs { ai, err := client.AccountInformation(addr, true) if err != nil { @@ -456,9 +458,7 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) { break } } - for assetID, ap := range newAssets { - pps.cinfo.AssetParams[assetID] = ap - } + maps.Copy(pps.cinfo.AssetParams, newAssets) return nil } diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go index 865c92eacc..ee0f6760d1 100644 --- a/shared/pingpong/config.go +++ b/shared/pingpong/config.go @@ -104,7 +104,7 @@ var DefaultConfig = PpConfig{ RandomizeDst: false, MaxRandomDst: 200000, MaxFee: 10000, - MinFee: 1000, + MinFee: 0, MaxAmt: 1000, TxnPerSec: 200, NumPartAccounts: 10, diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index 99942fd7eb..4bf6ba44af 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -16,7 +16,7 @@ // Package pingpong provides a transaction generating utility for performance testing. // -//nolint:unused,structcheck,deadcode,varcheck // ignore unused pingpong code +//nolint:unused // ignore unused pingpong code package pingpong import ( @@ -327,10 +327,8 @@ func (pps *WorkerState) schedule(n int) { if n > 1 { nextSendTime = nextSendTime.Add(timePerStep * time.Duration(n-1)) } - for { - if now.After(nextSendTime) { - break - } + for !now.After(nextSendTime) { + dur := nextSendTime.Sub(now) if dur < durationEpsilon { break @@ -1205,12 +1203,10 @@ func (pps *WorkerState) constructAppTxn(from string, fee uint64, client *libgoal } appOptIns := pps.cinfo.OptIns[aidx] + sender = from if len(appOptIns) > 0 { indices := rand.Perm(len(appOptIns)) - limit := 5 - if len(indices) < limit { - limit = len(indices) - } + limit := min(len(indices), 5) for i := 0; i < limit; i++ { idx := indices[i] accounts = append(accounts, appOptIns[idx]) @@ -1219,6 +1215,7 @@ func (pps *WorkerState) constructAppTxn(from string, fee uint64, client *libgoal if pps.cinfo.AppParams[aidx].Creator != from && !slices.Contains(appOptIns, from) { from = accounts[0] + sender = from } accounts = accounts[1:] } diff --git a/stateproof/db_test.go b/stateproof/db_test.go index ecf5750766..fde79d242d 100644 --- a/stateproof/db_test.go +++ b/stateproof/db_test.go @@ -144,10 +144,7 @@ func TestPendingSigDB(t *testing.T) { }) require.NoError(t, err) - expectedLen := 100 - int(deletedBefore) - if expectedLen < 0 { - expectedLen = 0 - } + expectedLen := max(100-int(deletedBefore), 0) require.Equal(t, len(psigs), expectedLen) require.Equal(t, len(psigsThis), expectedLen) diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go index 427fc6f309..e791b3a368 100644 --- a/test/e2e-go/cli/goal/clerk_test.go +++ b/test/e2e-go/cli/goal/clerk_test.go @@ -48,19 +48,24 @@ func TestClerkSendNoteEncoding(t *testing.T) { a.NotEmpty(accounts) account := accounts[0].Address + // Get current MinFee from network + client := fixture.LibGoalClient + params, err := client.SuggestedParams() + a.NoError(err) + minFee := int64(params.MinFee) + const noteText = "Sample Text-based Note" - txID, err := fixture.ClerkSend(account, account, 100, 1000, noteText) + txID, err := fixture.ClerkSend(account, account, 100, minFee, noteText) a.NoError(err) a.NotEmpty(txID) // Send 2nd txn using the note encoded as base-64 (using --noteb64) originalNoteb64Text := "Noteb64-encoded text With Binary \u0001x1x0x3" noteb64 := base64.StdEncoding.EncodeToString([]byte(originalNoteb64Text)) - txID2, err := fixture.ClerkSendNoteb64(account, account, 100, 1000, noteb64) + txID2, err := fixture.ClerkSendNoteb64(account, account, 100, minFee, noteb64) a.NoError(err) a.NotEmpty(txID2) - client := fixture.LibGoalClient status, err := client.Status() a.NoError(err) diff --git a/test/e2e-go/cli/goal/expect/basicGoalTest.exp b/test/e2e-go/cli/goal/expect/basicGoalTest.exp index b9ca99e56d..5e78f38854 100644 --- a/test/e2e-go/cli/goal/expect/basicGoalTest.exp +++ b/test/e2e-go/cli/goal/expect/basicGoalTest.exp @@ -64,7 +64,9 @@ if { [catch { # Transfer Algos from primary account to account 1 set MIN_BALANCE 100000 set TRANSFER_AMOUNT [expr {3 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] # Print the transaction id diff --git a/test/e2e-go/cli/goal/expect/createWalletTest.exp b/test/e2e-go/cli/goal/expect/createWalletTest.exp index af267f27b7..a21d6b4e4d 100644 --- a/test/e2e-go/cli/goal/expect/createWalletTest.exp +++ b/test/e2e-go/cli/goal/expect/createWalletTest.exp @@ -8,7 +8,6 @@ if { [catch { set TEST_ALGO_DIR [lindex $argv 0] set TEST_DATA_DIR [lindex $argv 1] set MIN_BALANCE 100000 - set MIN_TXN_FEE 1000 puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" puts "TEST_DATA_DIR: $TEST_DATA_DIR" @@ -39,6 +38,10 @@ if { [catch { set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE" + # Get suggested fee from network + set MIN_TXN_FEE [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $MIN_TXN_FEE" + # Associate a new account with the primary wallet set NEW_PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::CreateAccountForWallet $PRIMARY_WALLET_NAME $PRIMARY_WALLET_PASSWORD $TEST_PRIMARY_NODE_DIR] ::AlgorandGoal::VerifyAccount $PRIMARY_WALLET_NAME $PRIMARY_WALLET_PASSWORD $NEW_PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR diff --git a/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp b/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp index c4615e6364..c0230ad9e7 100644 --- a/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp +++ b/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp @@ -73,11 +73,14 @@ if { [catch { puts "Primary Account Address: $PRIMARY_ACCOUNT_ADDRESS Balance: $PRIMARY_ACCOUNT_BALANCE" + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" + # Transfer Algos from primary account to account set MINIMUM_BALANCE 100000 set TRANSFER_AMOUNT [expr {10 * $MINIMUM_BALANCE}] - set FEE_AMOUNT 1000 set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] # Print the transaction id @@ -97,13 +100,13 @@ if { [catch { # AMOUNT-MINIMUM alone (below) would not leave any overhead to pay fees. So, transfer AMOUNT-2*MINIMUM instead. set TRANSFER_1_AMOUNT [expr $TRANSFER_AMOUNT - $MINIMUM_BALANCE - $MINIMUM_BALANCE] - set TRANSACTION_ID_2 [::AlgorandGoal::AccountTransfer $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TRANSFER_1_AMOUNT $ACCOUNT_2_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] - + set TRANSACTION_ID_2 [::AlgorandGoal::AccountTransfer $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TRANSFER_1_AMOUNT $ACCOUNT_2_ADDRESS 0 $TEST_PRIMARY_NODE_DIR ""] + # perform the following operation : - # set TRANSACTION_ID_3 [::AlgorandGoal::AccountTransfer $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TRANSFER_1_AMOUNT $ACCOUNT_3_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] + # set TRANSACTION_ID_3 [::AlgorandGoal::AccountTransfer $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TRANSFER_1_AMOUNT $ACCOUNT_3_ADDRESS 0 $TEST_PRIMARY_NODE_DIR ""] # but expect to see that it's failing due to double spending. set timeout 60 - spawn goal clerk send --fee $FEE_AMOUNT --wallet $WALLET_1_NAME --amount $TRANSFER_1_AMOUNT --from $ACCOUNT_1_ADDRESS --to $ACCOUNT_3_ADDRESS -d $TEST_PRIMARY_NODE_DIR + spawn goal clerk send --wallet $WALLET_1_NAME --amount $TRANSFER_1_AMOUNT --from $ACCOUNT_1_ADDRESS --to $ACCOUNT_3_ADDRESS -d $TEST_PRIMARY_NODE_DIR expect { timeout { close; ::AlgorandGoal::Abort "Timed out transferring funds" } "Please enter the password for wallet '$WALLET_1_NAME':" { send "$WALLET_1_PASSWORD\r"; exp_continue } diff --git a/test/e2e-go/cli/goal/expect/goalAccountTest.exp b/test/e2e-go/cli/goal/expect/goalAccountTest.exp index 6f9563cbed..ce94b2354c 100755 --- a/test/e2e-go/cli/goal/expect/goalAccountTest.exp +++ b/test/e2e-go/cli/goal/expect/goalAccountTest.exp @@ -41,7 +41,7 @@ if { [catch { set LAST_COMMITTED_BLOCK [::AlgorandGoal::GetNodeLastCommittedBlock $TEST_PRIMARY_NODE_DIR] # test that sending a transaction where the last round is equal to the current round end up resulting in "Transaction %s expired before it could be included in a block" error. - spawn goal clerk send -a 10 --fee 1000 --firstvalid [expr {$LAST_COMMITTED_BLOCK + 1}] --lastvalid [expr {$LAST_COMMITTED_BLOCK + 1}] -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + spawn goal clerk send -a 10 --firstvalid [expr {$LAST_COMMITTED_BLOCK + 1}] --lastvalid [expr {$LAST_COMMITTED_BLOCK + 1}] -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR expect { timeout { close; ::AlgorandGoal::Abort "goal clerk send timeout" } -re {Transaction ([A-Z0-9]+) expired before it could be included in a block} { @@ -73,7 +73,10 @@ if { [catch { set MIN_BALANCE 100000 set TRANSFER_AMOUNT [expr {3 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 + # Get suggested fee via REST API + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" + set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $NEW_ACCOUNT_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] set GLOBAL_BYTE_SLICES 1 @@ -81,47 +84,62 @@ if { [catch { set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs" set APP_ID [::AlgorandGoal::AppCreate0 $PRIMARY_WALLET_NAME "" $NEW_ACCOUNT_ADDRESS ${TEAL_PROGS_DIR}/clear_program_state.teal $GLOBAL_BYTE_SLICES $LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR] - # expect app idx = 1002 since a pre-recorded response is checked down the road + # expect app idx = 1002 (txnCounter starts at 1000, AccountTransfer increments to 1001, AppCreate gets 1002) if { $APP_ID != 1002 } { - ::AlgorandGoal::Abort "Expected app id to be 1002 but got $APP_ID. Have you posted additional transactions? Only transfer txn is expected before app call txn" - } - - # check JSON output to stdout - set JSON_EXPECTED "{ - \"addr\": \"47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU\", - \"algo\": 299000, - \"appp\": { - \"1002\": { - \"approv\": \"AiABASI=\", - \"clearp\": \"AiABASI=\", - \"gsch\": { - \"nbs\": 1 - } - } - }, - \"tsch\": { - \"nbs\": 1 - } -}" + ::AlgorandGoal::Abort "Expected app id to be 1002 but got $APP_ID. This is the first app created after the transfer transaction." + } + # check JSON output to stdout - verify structure, app data, and exact balance set JSON_ACTUAL [exec goal account dump -a $NEW_ACCOUNT_ADDRESS --datadir $TEST_PRIMARY_NODE_DIR] - if { $JSON_ACTUAL != $JSON_EXPECTED } { - ::AlgorandGoal::Abort "json actual output '$JSON_ACTUAL' does not match expected '$JSON_EXPECTED'" + + # Verify critical fields are present (address, app params, schemas) + if { ! [string match {*"addr": "47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU"*} $JSON_ACTUAL] } { + ::AlgorandGoal::Abort "JSON missing correct address" + } + if { ! [string match {*"appp"*} $JSON_ACTUAL] } { + ::AlgorandGoal::Abort "JSON missing app params" + } + if { ! [string match {*"1002"*} $JSON_ACTUAL] } { + ::AlgorandGoal::Abort "JSON missing app ID 1002" + } + if { ! [string match {*"approv": "AiABASI="*} $JSON_ACTUAL] } { + ::AlgorandGoal::Abort "JSON missing or incorrect approval program" + } + if { ! [string match {*"clearp": "AiABASI="*} $JSON_ACTUAL] } { + ::AlgorandGoal::Abort "JSON missing or incorrect clear program" } + # Verify exact balance: transfer amount minus app creation fee + if { ! [regexp {"algo": ([0-9]+)} $JSON_ACTUAL match balance] } { + ::AlgorandGoal::Abort "Failed to extract balance from JSON" + } + set EXPECTED_BALANCE [expr {$TRANSFER_AMOUNT - $FEE_AMOUNT}] + if { $balance != $EXPECTED_BALANCE } { + ::AlgorandGoal::Abort "Balance $balance does not match expected $EXPECTED_BALANCE (transfer $TRANSFER_AMOUNT - app creation fee $FEE_AMOUNT)" + } + puts "Account balance: $balance (exact match)" + # check msgpack output to a file with zero exit code - set MSGP_EXPECTED_BASE64 "hKRhZGRyxCDn8PhNBoEd+fMcjYeLEVX0Zx1RoYXCAJCGZ/RJWHBooaRhbGdvzgAEj/ikYXBwcIHNA+qDpmFwcHJvdsQFAiABASKmY2xlYXJwxAUCIAEBIqRnc2NogaNuYnMBpHRzY2iBo25icwE=" - set MSGP_EXPECTED [ exec echo -n $MSGP_EXPECTED_BASE64 | base64 --decode ] set BALREC_FILE "$TEST_ROOT_DIR/brec.msgp" spawn goal account dump -a $NEW_ACCOUNT_ADDRESS -o $BALREC_FILE --datadir $TEST_PRIMARY_NODE_DIR expect { timeout { close; ::AlgorandGoal::Abort "goal account dump timeout" } eof { ::AlgorandGoal::CheckEOF "Failed to dump account" } } - set MSGP_ACTUAL [exec cat "$BALREC_FILE"] - if { $MSGP_ACTUAL != $MSGP_EXPECTED } { - ::AlgorandGoal::Abort "msgp actual output '$MSGP_ACTUAL' does not match expected '$MSGP_EXPECTED'" + + # Verify msgpack file balance matches expected balance + set BALREC_CONTENT [exec cat $BALREC_FILE | msgpacktool -d] + if { ! [regexp {\"algo\": ([0-9]+)} $BALREC_CONTENT match msgp_balance] } { + ::AlgorandGoal::Abort "Failed to extract balance from msgpack file" + } + if { $msgp_balance != $EXPECTED_BALANCE } { + ::AlgorandGoal::Abort "Msgpack balance $msgp_balance does not match expected $EXPECTED_BALANCE" + } + # Verify app ID is 1002 in msgpack (appears as integer key in JSON output) + if { ! [string match {*1002:*} $BALREC_CONTENT] } { + ::AlgorandGoal::Abort "Msgpack missing app ID 1002" } + puts "Msgpack dump verified (balance: $msgp_balance)" # check some empty response on non-existing address with zero exit code set PASSED 0 diff --git a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp index 6146fa1720..7a85e6acfb 100755 --- a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp +++ b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp @@ -70,7 +70,9 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} { # Transfer Algos from primary account to accounts 1-4 set MIN_BALANCE 1000000 set TRANSFER_AMOUNT [expr {1000 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_2_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_3_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] diff --git a/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp b/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp index c1f6f4d5ec..6d91f60ee2 100644 --- a/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp +++ b/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp @@ -29,7 +29,9 @@ if { [catch { set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] set timeout 30 - set FEE_AMOUNT 1000 + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" set TXN_FILE1 "$TEST_PRIMARY_NODE_DIR/txns1" set TRANSFER_AMOUNT1 3800 diff --git a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp index 3227d7fdd9..5b48aa7910 100644 --- a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp +++ b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp @@ -114,7 +114,7 @@ if { [catch { set AT_TX2_FILE "$TEST_ROOT_DIR/atomic-tran-tx2.mspg" set AT_COMBINED_FILE "$TEST_ROOT_DIR/atomic-tran-comb.mspg" set AT_GROUPPED_FILE "$TEST_ROOT_DIR/atomic-tran-group.mspg" - spawn goal clerk send --from $PRIMARY_ACCOUNT_ADDRESS --to $PRIMARY_ACCOUNT_ADDRESS -a 1 --fee 1000 -d $TEST_PRIMARY_NODE_DIR -o $AT_TX1_FILE + spawn goal clerk send --from $PRIMARY_ACCOUNT_ADDRESS --to $PRIMARY_ACCOUNT_ADDRESS -a 1 -d $TEST_PRIMARY_NODE_DIR -o $AT_TX1_FILE expect { timeout { ::AlgorandGoal::Abort "goal clerk send timeout" } } diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index e452fbfecc..e2dd3880f4 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -526,19 +526,47 @@ proc ::AlgorandGoal::AccountTransfer { FROM_WALLET_NAME FROM_WALLET_PASSWORD FRO return $TRANSACTION_ID } +# Get the network's suggested minimum fee via REST API +# Returns the fee value +proc ::AlgorandGoal::GetSuggestedFee { WALLET_NAME ACCOUNT_ADDRESS TEST_PRIMARY_NODE_DIR } { + if { [catch { + set TOKEN_FILE [open $TEST_PRIMARY_NODE_DIR/algod.token] + set ALGOD_TOKEN [string trim [read $TOKEN_FILE]] + close $TOKEN_FILE + + set NET_FILE [open $TEST_PRIMARY_NODE_DIR/algod.net] + set ALGOD_NET [string trim [read $NET_FILE]] + close $NET_FILE + + set SUGGESTED_FEE [exec curl -q -s -H "Authorization: Bearer $ALGOD_TOKEN" "http://$ALGOD_NET/v2/transactions/params" | jq -r {.["min-fee"]}] + + if { $SUGGESTED_FEE == "" || $SUGGESTED_FEE == "null" } { + ::AlgorandGoal::Abort "Failed to get min fee from network REST API" + } + } EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in GetSuggestedFee: $EXCEPTION" + } + return $SUGGESTED_FEE +} + # Account Transfer ( with optional wait flag) proc ::AlgorandGoal::AccountTransferWait { FROM_WALLET_NAME FROM_WALLET_PASSWORD FROM_ACCOUNT_ADDRESS TRANSFER_AMOUNT TO_ACCOUNT_ADDRESS FEE_AMOUNT TEST_PRIMARY_NODE_DIR OUT_FILE WAIT} { set timeout 60 if { [ catch { set TRANSACTION_ID "NOT SET" + # Build fee argument - if FEE_AMOUNT is 0, omit --fee flag to use suggested fee + set FEE_ARG "" + if { $FEE_AMOUNT != 0 } { + set FEE_ARG "--fee $FEE_AMOUNT" + } if { $OUT_FILE == "" } { if { $WAIT == "" } { - spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --no-wait + spawn goal clerk send {*}$FEE_ARG --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --no-wait } else { - spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + spawn goal clerk send {*}$FEE_ARG --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR } } else { - spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --out $OUT_FILE + spawn goal clerk send {*}$FEE_ARG --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --out $OUT_FILE } expect { timeout { close; ::AlgorandGoal::Abort "Timed out transferring funds" } @@ -1408,12 +1436,35 @@ proc ::AlgorandGoal::InspectTransactionFile { TRX_FILE } { proc ::AlgorandGoal::RunPingpong {DURATION PINGPONG_OPTIONS TEST_PRIMARY_NODE_DIR} { set timeout [expr $DURATION + 60] if { [ catch { - set pingpong_base "pingpong run --duration $DURATION -d $TEST_PRIMARY_NODE_DIR --quiet " + set pingpong_base "pingpong run --duration $DURATION -d $TEST_PRIMARY_NODE_DIR " set pingpong_command [concat $pingpong_base $PINGPONG_OPTIONS] puts "starting pingpong test with command: $pingpong_command" + + # Track whether we're expecting app calls + set expecting_apps 0 + if {[string match "*--numapp*" $PINGPONG_OPTIONS]} { + set expecting_apps 1 + set saw_app_calls 0 + puts "Test expects app calls since --numapp is specified" + } + eval spawn $pingpong_command expect { - timeout { puts "pingpong test interrupted by timeout, terminating after $timeout seconds" } + timeout { ::AlgorandGoal::Abort "pingpong test interrupted by timeout after $timeout seconds" } + "no acct" {::AlgorandGoal::Abort "pingpong failed with 'no acct' error - likely sender not set in constructAppTxn"} + -re {(?i)error[ :]} { + ::AlgorandGoal::Abort "pingpong encountered error: $expect_out(0,string)" + } + -re {(?i)panic} { + ::AlgorandGoal::Abort "pingpong panicked: $expect_out(0,string)" + } + "Calling app" { + if {$expecting_apps} { + set saw_app_calls 1 + puts "Saw app call" + } + exp_continue + } -re {Sent (\d+) transactions \((\d+) attempted\).} { set actual $expect_out(1,string) ; set attempted $expect_out(2,string) ; @@ -1421,7 +1472,12 @@ proc ::AlgorandGoal::RunPingpong {DURATION PINGPONG_OPTIONS TEST_PRIMARY_NODE_DI if { $actual != $attempted } then { ::AlgorandGoal::Abort "Pingpong attempted to send $attempted transactions, but actual was $actual"; break;} exp_continue } - "Terminating after max run time of" {puts "end of ping pong test"} + "Terminating after max run time of" { + puts "end of ping pong test" + if {$expecting_apps && !$saw_app_calls} { + ::AlgorandGoal::Abort "Test with --numapp expected app calls but saw none - possible constructAppTxn bug" + } + } eof {::AlgorandGoal::Abort "pingpong terminated unexpectedly: $expect_out(buffer)"} "Error" {::AlgorandGoal::Abort "error running pingpong: $expect_out(buffer)"} } diff --git a/test/e2e-go/cli/goal/expect/goalLogicSigTest.exp b/test/e2e-go/cli/goal/expect/goalLogicSigTest.exp index 9d7a3eb393..b668db5492 100644 --- a/test/e2e-go/cli/goal/expect/goalLogicSigTest.exp +++ b/test/e2e-go/cli/goal/expect/goalLogicSigTest.exp @@ -36,7 +36,7 @@ if { [catch { set TEAL_SOURCE "$TEST_ROOT_DIR/int1.teal" exec cp "$TEAL_PROGS_DIR/int1.teal" $TEAL_SOURCE set CONTRACT_ADDRESS [::AlgorandGoal::TealCompile $TEAL_SOURCE] - spawn goal clerk send -a 0 --fee 1000 -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS --rekey-to $CONTRACT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + spawn goal clerk send -a 0 -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS --rekey-to $CONTRACT_ADDRESS -d $TEST_PRIMARY_NODE_DIR expect { timeout { close; ::AlgorandGoal::Abort "goal clerk send timeout" } -re {Transaction ([A-Z0-9]+) expired before it could be included in a block} { diff --git a/test/e2e-go/cli/goal/expect/limitOrderTest.exp b/test/e2e-go/cli/goal/expect/limitOrderTest.exp index a69a4275d6..3855b5f79c 100644 --- a/test/e2e-go/cli/goal/expect/limitOrderTest.exp +++ b/test/e2e-go/cli/goal/expect/limitOrderTest.exp @@ -63,10 +63,13 @@ if { [catch { # -------------------------- setup accounts ---------------------------------- + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" + # Transfer Algos from primary account to account 1 set MIN_BALANCE 1000000 set TRANSFER_AMOUNT [expr {1000 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] # Print the transaction id diff --git a/test/e2e-go/cli/goal/expect/pingpongAssetLargeTest.exp b/test/e2e-go/cli/goal/expect/pingpongAssetLargeTest.exp new file mode 100644 index 0000000000..e4d474ed15 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongAssetLargeTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "AssetLarge" "asset_transfer_large" "--tps 200 --numasset=10 --numaccounts 5 --refresh 10" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongAssetSmallTest.exp b/test/e2e-go/cli/goal/expect/pingpongAssetSmallTest.exp new file mode 100644 index 0000000000..c9fd7a5983 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongAssetSmallTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "AssetSmall" "asset_transfer_small" "--tps 200 --numasset=5 --numaccounts 5 --refresh 10" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongAtomicLargeTest.exp b/test/e2e-go/cli/goal/expect/pingpongAtomicLargeTest.exp new file mode 100644 index 0000000000..b4761cb673 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongAtomicLargeTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "AtomicLarge" "atomic_transfer_large" "--groupsize=12 --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongAtomicSmallTest.exp b/test/e2e-go/cli/goal/expect/pingpongAtomicSmallTest.exp new file mode 100644 index 0000000000..da33db156b --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongAtomicSmallTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "AtomicSmall" "atomic_transfer_small" "--groupsize=5 --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongBigOpsBigHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongBigOpsBigHashTest.exp new file mode 100644 index 0000000000..8c90eb02c3 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongBigOpsBigHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "BigOpsBigHash" "bigops_bighash" "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongBigOpsMediumHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongBigOpsMediumHashTest.exp new file mode 100644 index 0000000000..a2be6354f3 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongBigOpsMediumHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "BigOpsMediumHash" "bigops_mediumhash" "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongBigOpsSmallHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongBigOpsSmallHashTest.exp new file mode 100644 index 0000000000..f076007479 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongBigOpsSmallHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "BigOpsSmallHash" "bigops_smallhash" "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongCommon.exp b/test/e2e-go/cli/goal/expect/pingpongCommon.exp new file mode 100644 index 0000000000..7f6c1f0f32 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongCommon.exp @@ -0,0 +1,98 @@ +#!/usr/bin/expect -f +# Common utilities for pingpong tests +# This file should be sourced by individual pingpong test files + +source goalExpectCommon.exp + +# Setup a pingpong test network and return network info +proc setupPingpongNetwork { TEST_ALGO_DIR TEST_DATA_DIR } { + set timeout 60 + set TIME_STAMP [clock seconds] + + set TEST_ROOT_DIR $TEST_ALGO_DIR/root_$TIME_STAMP + set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ + set NETWORK_NAME test_net_expect_$TIME_STAMP + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json" + + # Copy genesis file + exec cp $TEST_DATA_DIR/../../installer/genesis/devnet/genesis.json $TEST_ALGO_DIR + + # Create and start network + ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR + + # Get network info + set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ] + puts "Primary Node Address: $PRIMARY_NODE_ADDRESS" + + set PRIMARY_WALLET_NAME unencrypted-default-wallet + + # Determine primary account + set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] + + # Check the balance of the primary account + set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE" + + # Wait for round 1 to ensure network is ready + ::AlgorandGoal::WaitForRound 1 $TEST_PRIMARY_NODE_DIR + + # Return list of important values + return [list $TEST_ROOT_DIR $TEST_PRIMARY_NODE_DIR $NETWORK_NAME $PRIMARY_NODE_ADDRESS $PRIMARY_ACCOUNT_ADDRESS] +} + +# Teardown a pingpong test network +proc teardownPingpongNetwork { NETWORK_NAME TEST_ROOT_DIR } { + ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR +} + +# Run a pingpong variation with the given arguments +proc runPingpongVariation { variation_name pingpong_args TEST_PRIMARY_NODE_DIR duration } { + puts "Running pingpong variation: $variation_name" + puts "Arguments: $pingpong_args" + puts "Duration: $duration seconds" + + ::AlgorandGoal::RunPingpong $duration $pingpong_args $TEST_PRIMARY_NODE_DIR +} + +# Common error handler for pingpong tests +proc pingpongTestErrorHandler { test_name exception } { + ::AlgorandGoal::Abort "ERROR in $test_name: $exception" +} + +# Main test runner that does all the work +proc runPingpongTest { test_name variation_name pingpong_args {duration 5} } { + global TEST_ALGO_DIR TEST_DATA_DIR + + # Setup network + set network_info [setupPingpongNetwork $TEST_ALGO_DIR $TEST_DATA_DIR] + set TEST_ROOT_DIR [lindex $network_info 0] + set TEST_PRIMARY_NODE_DIR [lindex $network_info 1] + set NETWORK_NAME [lindex $network_info 2] + + # Run the test + runPingpongVariation $variation_name $pingpong_args $TEST_PRIMARY_NODE_DIR $duration + + # Teardown network + teardownPingpongNetwork $NETWORK_NAME $TEST_ROOT_DIR + + puts "Pingpong $test_name Test Successful" +} + +# Main entry point for all pingpong tests +proc pingpongTestMain { test_name variation_name pingpong_args {duration 5} } { + global TEST_ALGO_DIR TEST_DATA_DIR + + if { [catch { + source goalExpectCommon.exp + puts "Starting $test_name" + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + puts "TEST_DATA_DIR: $TEST_DATA_DIR" + + runPingpongTest $test_name $variation_name $pingpong_args $duration + + exit 0 + } EXCEPTION ] } { + pingpongTestErrorHandler $test_name $EXCEPTION + } +} \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongMediumOpsBigHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongMediumOpsBigHashTest.exp new file mode 100644 index 0000000000..fba694ebab --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongMediumOpsBigHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "MediumOpsBigHash" "mediumops_bighash" "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongMediumOpsMediumHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongMediumOpsMediumHashTest.exp new file mode 100644 index 0000000000..22100006cb --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongMediumOpsMediumHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "MediumOpsMediumHash" "mediumops_mediumhash" "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongMediumOpsSmallHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongMediumOpsSmallHashTest.exp new file mode 100644 index 0000000000..43387258dc --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongMediumOpsSmallHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "MediumOpsSmallHash" "mediumops_smallhash" "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongPaymentTest.exp b/test/e2e-go/cli/goal/expect/pingpongPaymentTest.exp new file mode 100644 index 0000000000..eaa9729eeb --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongPaymentTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "Payment" "payment_transaction" "--tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongRekeyTest.exp b/test/e2e-go/cli/goal/expect/pingpongRekeyTest.exp new file mode 100644 index 0000000000..6850186119 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongRekeyTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "Rekey" "rekey_payment_transaction" "--rekey=true --groupsize=2 --randomnote=true --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongSmallOpsBigHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongSmallOpsBigHashTest.exp new file mode 100644 index 0000000000..2f6a5d8f61 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongSmallOpsBigHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "SmallOpsBigHash" "smallops_bighash" "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongSmallOpsMediumHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongSmallOpsMediumHashTest.exp new file mode 100644 index 0000000000..60a8621d88 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongSmallOpsMediumHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "SmallOpsMediumHash" "smallops_mediumhash" "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongSmallOpsSmallHashTest.exp b/test/e2e-go/cli/goal/expect/pingpongSmallOpsSmallHashTest.exp new file mode 100644 index 0000000000..b6b9ad973b --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongSmallOpsSmallHashTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "SmallOpsSmallHash" "smallops_smallhash" "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 5 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongStatefulBoxReadTest.exp b/test/e2e-go/cli/goal/expect/pingpongStatefulBoxReadTest.exp new file mode 100644 index 0000000000..fbd706bbe1 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongStatefulBoxReadTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "StatefulBoxRead" "stateful_box_read" "--numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000 --numboxread 5" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongStatefulBoxUpdateTest.exp b/test/e2e-go/cli/goal/expect/pingpongStatefulBoxUpdateTest.exp new file mode 100644 index 0000000000..697275025c --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongStatefulBoxUpdateTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "StatefulBoxUpdate" "stateful_box_update" "--numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000 --numboxupdate 5" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongStatefulLargeTest.exp b/test/e2e-go/cli/goal/expect/pingpongStatefulLargeTest.exp new file mode 100644 index 0000000000..53287aacfa --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongStatefulLargeTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "StatefulLarge" "stateful_teal_large" "--numapp 10 --appprogops 695 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongStatefulMediumTest.exp b/test/e2e-go/cli/goal/expect/pingpongStatefulMediumTest.exp new file mode 100644 index 0000000000..ba8d304e13 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongStatefulMediumTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "StatefulMedium" "stateful_teal_medium" "--numapp 10 --appprogops 200 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongStatefulSmallTest.exp b/test/e2e-go/cli/goal/expect/pingpongStatefulSmallTest.exp new file mode 100644 index 0000000000..3797b5edc8 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongStatefulSmallTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "StatefulSmall" "stateful_teal_small" "--numapp 10 --appprogops 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongTealHeavyTest.exp b/test/e2e-go/cli/goal/expect/pingpongTealHeavyTest.exp new file mode 100644 index 0000000000..6cc792e037 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongTealHeavyTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "TealHeavy" "teal_heavy_transaction" "--teal=heavy --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongTealLightTest.exp b/test/e2e-go/cli/goal/expect/pingpongTealLightTest.exp new file mode 100644 index 0000000000..bef557103b --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongTealLightTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "TealLight" "teal_light_transaction" "--teal=light --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongTealNormalTest.exp b/test/e2e-go/cli/goal/expect/pingpongTealNormalTest.exp new file mode 100644 index 0000000000..f0d840ebbf --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongTealNormalTest.exp @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] +source pingpongCommon.exp +pingpongTestMain "TealNormal" "teal_normal_transaction" "--teal=normal --tps 200 --refresh 10 --numaccounts 50" \ No newline at end of file diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp deleted file mode 100644 index 99fb9a3ee7..0000000000 --- a/test/e2e-go/cli/goal/expect/pingpongTest.exp +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/expect -f -#exp_internal 1 -set err 0 -log_user 1 - -source goalExpectCommon.exp - -set TEST_ALGO_DIR [lindex $argv 0] -set TEST_DATA_DIR [lindex $argv 1] - -proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} { - - set timeout 60 - set TIME_STAMP [clock seconds] - - set TEST_ROOT_DIR $TEST_ALGO_DIR/root_$TIME_STAMP - set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ - set NETWORK_NAME test_net_expect_$TIME_STAMP - set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json" - - exec cp $TEST_DATA_DIR/../../installer/genesis/devnet/genesis.json $TEST_ALGO_DIR - - # Create network - ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR - - # Start network - ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR - - set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ] - puts "Primary Node Address: $PRIMARY_NODE_ADDRESS" - - set PRIMARY_WALLET_NAME unencrypted-default-wallet - - # Determine primary account - set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] - - # Check the balance of the primary account - set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] - puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE" - - ::AlgorandGoal::WaitForRound 1 $TEST_PRIMARY_NODE_DIR - - set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs" - - # Network Setup complete - #---------------------- - - # Run pingpong tests - #---------------------- - - - set pingpong_duration 5 - - set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 5 --minaccount 100000000" - set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - - set pingpongArray(10_payment_transaction) "--tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --refresh 10 --numaccounts 50" - set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --numaccounts 10 --refresh 10 --mf=1000" - set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --numaccounts 10 --refresh 10 --mf=1000" - set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --refresh 10 --numaccounts 50" - - - foreach index [array names pingpongArray] { - puts "pingpongArray($index): $pingpongArray($index)" - ::AlgorandGoal::RunPingpong $pingpong_duration $pingpongArray($index) $TEST_PRIMARY_NODE_DIR - } - - # Shutdown the network - #---------------------- - ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR - - puts "Pinpong Test Successful" - -} - - -if { [catch { - source goalExpectCommon.exp - - puts "starting pinpongTest" - - puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" - puts "TEST_DATA_DIR: $TEST_DATA_DIR" - - pingpongTest $TEST_ALGO_DIR $TEST_DATA_DIR - - exit 0 - -} EXCEPTION ] } { - ::AlgorandGoal::Abort "ERROR in pinpongTest: $EXCEPTION" -} diff --git a/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp index e0990ce12a..27084ade02 100644 --- a/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp +++ b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp @@ -38,7 +38,9 @@ if { [catch { # send 1000 messages and wait for them to get applied. set EXPECTED_BALANCE $INITIAL_ACCOUNT_BALANCE set TRANSFER_AMOUNT_BASE 1000000 - set FEE_AMOUNT 1000 + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" set TRANSACTION_COUNT 10 for {set txIdx 0} {$txIdx < $TRANSACTION_COUNT} {incr txIdx 1} { set TRANSFER_AMOUNT [expr $TRANSFER_AMOUNT_BASE+$txIdx] diff --git a/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp b/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp index 0051d65cc2..edd4ea9bbb 100644 --- a/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp +++ b/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp @@ -55,7 +55,9 @@ proc statefulTealTest { TEST_ALGO_DIR TEST_DATA_DIR TEAL_PROGRAM} { # Transfer Algos from primary account to account 1 set MIN_BALANCE 1000000 set TRANSFER_AMOUNT [expr {1000 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs" diff --git a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp index b4c6e817e1..95da082ae2 100644 --- a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp +++ b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp @@ -63,10 +63,13 @@ if { [catch { # -------------------------- setup accounts ---------------------------------- + # Get suggested fee from network + set FEE_AMOUNT [::AlgorandGoal::GetSuggestedFee $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Network suggested fee: $FEE_AMOUNT" + # Transfer Algos from primary account to account 1 set MIN_BALANCE 1000000 set TRANSFER_AMOUNT [expr {1000 * $MIN_BALANCE}] - set FEE_AMOUNT 1000 set TRANSACTION_ID [::AlgorandGoal::AccountTransfer $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $ACCOUNT_1_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR ""] # Print the transaction id diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go index e0a94c9eb3..c5ab87da38 100644 --- a/test/e2e-go/features/accountPerf/sixMillion_test.go +++ b/test/e2e-go/features/accountPerf/sixMillion_test.go @@ -195,22 +195,26 @@ func signerGrpTxn( // create 6M unique assets by a different 6,000 accounts, and have a single account opted in, and owning all of them func Test5MAssetsScenario1(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside test5MAssets test5MAssets(t, 1) } // create 6M unique assets, all created by a single account. func Test5MAssetsScenario2(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside test5MAssets test5MAssets(t, 2) } // create 6M unique apps by a different 6,000 accounts, and have a single account opted-in all of them. // Make an app call to each of them, and make sure the app store some information into the local storage. func Test5MAssetsScenario3(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside test5MAssets test5MAssets(t, 3) } // create 6M unique apps by a single account. Opt-into all the applications and make sure the app stores information to both the local and global storage. func Test5MAssetsScenario4(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside test5MAssets test5MAssets(t, 4) } @@ -596,7 +600,7 @@ func scenarioA( require.NoError(t, err) log.Infof("Verifying assets...") - // Verify the assets are transfered here + // Verify the assets are transferred here tAssetAmt := uint64(0) for nai, nacc := range keys { if nacc == ownAllAccount { diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index b0422a9790..1fadd93e49 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -332,7 +332,7 @@ func shutdownClonedNode(nodeDataDir string, f *fixtures.RestClientFixture, t *te } } -// TestBasicCatchupCompletes confirms the the catchup eventually completes and stops. +// TestBasicCatchupCompletes confirms that catchup eventually completes and stops. func TestBasicCatchupCompletes(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 5e7ec6d14b..c4ff7a3040 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -171,23 +171,19 @@ func (ec *nodeExitErrorCollector) nodeExitWithError(nc *nodecontrol.NodeControll exitError, ok := err.(*exec.ExitError) if !ok { - if err != nil { - ec.mu.Lock() - ec.errors = append(ec.errors, err) - ec.messages = append(ec.messages, "Node at %s has terminated with an error", nc.GetDataDir()) - ec.mu.Unlock() - } + ec.mu.Lock() + ec.errors = append(ec.errors, err) + ec.messages = append(ec.messages, fmt.Sprintf("Node at %s has terminated with an error", nc.GetDataDir())) + ec.mu.Unlock() return } ws := exitError.Sys().(syscall.WaitStatus) exitCode := ws.ExitStatus() - if err != nil { - ec.mu.Lock() - ec.errors = append(ec.errors, err) - ec.messages = append(ec.messages, fmt.Sprintf("Node at %s has terminated with error code %d", nc.GetDataDir(), exitCode)) - ec.mu.Unlock() - } + ec.mu.Lock() + ec.errors = append(ec.errors, err) + ec.messages = append(ec.messages, fmt.Sprintf("Node at %s has terminated with error code %d", nc.GetDataDir(), exitCode)) + ec.mu.Unlock() } func (ec *nodeExitErrorCollector) Print() { @@ -550,8 +546,8 @@ func downloadCatchpointFile(t *testing.T, a *require.Assertions, baseURL string, var chunks []ledger.CatchpointSnapshotChunkV6 for _, d := range tarData { t.Logf("tar filename: %s, size %d", d.headerName, len(d.data)) - if strings.HasPrefix(d.headerName, "balances.") { // chunk file - idxStr := strings.TrimSuffix(strings.TrimPrefix(d.headerName, "balances."), ".msgpack") + if after, ok := strings.CutPrefix(d.headerName, "balances."); ok { // chunk file + idxStr := strings.TrimSuffix(after, ".msgpack") idx, err := strconv.Atoi(idxStr) a.NoError(err) var c ledger.CatchpointSnapshotChunkV6 @@ -679,8 +675,11 @@ func TestNodeTxHandlerRestart(t *testing.T) { addrs2, err := client2.ListAddresses(wallet2) a.NoError(err) + params, err := client2.SuggestedParams() + a.NoError(err) + // let the second node have insufficient stake for proposing a block - tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil) + tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], params.MinFee, 4999999999000000, nil) a.NoError(err) status, err := client1.Status() a.NoError(err) @@ -704,7 +703,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { a.NoError(err) // let the 2nd client send a transaction - tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 50000, nil) + tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], params.MinFee, 50000, nil) a.NoError(err) status, err = client2.Status() @@ -786,8 +785,11 @@ func TestReadyEndpoint(t *testing.T) { addrs2, err := client2.ListAddresses(wallet2) a.NoError(err) + params, err := client2.SuggestedParams() + a.NoError(err) + // let the second node have insufficient stake for proposing a block - tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil) + tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], params.MinFee, 4999999999000000, nil) a.NoError(err) status, err := client1.Status() a.NoError(err) @@ -924,8 +926,11 @@ func TestNodeTxSyncRestart(t *testing.T) { addrs2, err := client2.ListAddresses(wallet2) a.NoError(err) + params, err := client2.SuggestedParams() + a.NoError(err) + // let the second node have insufficient stake for proposing a block - tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil) + tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], params.MinFee, 4999999999000000, nil) a.NoError(err) status, err := client1.Status() a.NoError(err) @@ -940,7 +945,7 @@ func TestNodeTxSyncRestart(t *testing.T) { client1.FullStop() // let the 2nd client send a transaction - tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 50000, nil) + tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], params.MinFee, 50000, nil) a.NoError(err) // now that the primary missed the transaction, start it, and let it catchup diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go index 9eb3866972..209cc07eb3 100644 --- a/test/e2e-go/features/devmode/devmode_test.go +++ b/test/e2e-go/features/devmode/devmode_test.go @@ -101,7 +101,10 @@ func testTxnGroupDeltasDevMode(t *testing.T, version protocol.ConsensusVersion) wh, err := fixture.LibGoalClient.GetUnencryptedWalletHandle() require.NoError(t, err) - fundingTx, err := fixture.LibGoalClient.SendPaymentFromWalletWithLease(wh, nil, sender.Address, receiver.String(), 1000, 100000, nil, "", [32]byte{1, 2, 3}, basics.Round(curRound).SubSaturate(1), 0) + params, err := fixture.LibGoalClient.SuggestedParams() + require.NoError(t, err) + + fundingTx, err := fixture.LibGoalClient.SendPaymentFromWalletWithLease(wh, nil, sender.Address, receiver.String(), params.MinFee, 100000, nil, "", [32]byte{1, 2, 3}, basics.Round(curRound).SubSaturate(1), 0) require.NoError(t, err) txn, err := fixture.WaitForConfirmedTxn(curRound+5, fundingTx.ID().String()) require.NoError(t, err) diff --git a/test/e2e-go/features/incentives/challenge_test.go b/test/e2e-go/features/incentives/challenge_test.go index 9aca225dcb..62856a7e93 100644 --- a/test/e2e-go/features/incentives/challenge_test.go +++ b/test/e2e-go/features/incentives/challenge_test.go @@ -67,10 +67,12 @@ func testChallengesOnce(t *testing.T, a *require.Assertions) (retry bool) { const grace = 10 const mask = 0x80 + consensusVersion := protocol.ConsensusFuture + var fixture fixtures.RestClientFixture // Speed up rounds, keep lookback > 2 * grace period - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) - fixture.AlterConsensus(protocol.ConsensusFuture, + fixture.FasterConsensus(consensusVersion, time.Second, lookback) + fixture.AlterConsensus(consensusVersion, func(cp config.ConsensusParams) config.ConsensusParams { cp.Payouts.ChallengeInterval = 50 cp.Payouts.ChallengeGracePeriod = 10 @@ -95,13 +97,14 @@ func testChallengesOnce(t *testing.T, a *require.Assertions) (retry bool) { err := fixture.WaitForRoundWithTimeout(interval - lookback) // Make all LastHeartbeats > interval, < 2*interval a.NoError(err) + proto := config.Consensus[consensusVersion] // eligible accounts1 will get challenged with node offline, and suspended for _, account := range accounts1 { - rekeyreg(a, c1, account.Address, eligible(account.Address)) + rekeyreg(a, c1, proto, account.Address, eligible(account.Address)) } // eligible accounts2 will get challenged, but node2 will heartbeat for them for _, account := range accounts2 { - rekeyreg(a, c2, account.Address, eligible(account.Address)) + rekeyreg(a, c2, proto, account.Address, eligible(account.Address)) } // turn off node 1, so it can't heartbeat diff --git a/test/e2e-go/features/incentives/payouts_test.go b/test/e2e-go/features/incentives/payouts_test.go index 1a437f3555..dd73aa2b73 100644 --- a/test/e2e-go/features/incentives/payouts_test.go +++ b/test/e2e-go/features/incentives/payouts_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -45,10 +46,12 @@ func TestBasicPayouts(t *testing.T) { t.Parallel() a := require.New(fixtures.SynchronizedTest(t)) + consensusVersion := protocol.ConsensusFuture + var fixture fixtures.RestClientFixture // Make the seed lookback shorter, otherwise we need to wait 320 rounds to become IncentiveEligible. const lookback = 32 - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.FasterConsensus(consensusVersion, time.Second, lookback) t.Logf("lookback is %d\n", lookback) fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) defer fixture.Shutdown() @@ -72,8 +75,10 @@ func TestBasicPayouts(t *testing.T) { c01, account01 := clientAndAccount("Node01") relay, _ := clientAndAccount("Relay") - data01 := rekeyreg(a, c01, account01.Address, true) - data15 := rekeyreg(a, c15, account15.Address, true) + proto := config.Consensus[consensusVersion] + + data01 := rekeyreg(a, c01, proto, account01.Address, true) + data15 := rekeyreg(a, c15, proto, account15.Address, true) // Wait a few rounds after rekeyreg, this means that `lookback` rounds after // those rekeyregs, the nodes will be IncentiveEligible, but both will have @@ -87,7 +92,7 @@ func TestBasicPayouts(t *testing.T) { // have account01 burn some money to get below the eligibility cap // Starts with 100M, so burn 60M and get under 70M cap. txn, err := c01.SendPaymentFromUnencryptedWallet(account01.Address, basics.Address{}.String(), - 1000, 60_000_000_000_000, nil) + proto.MinTxnFee, 60_000_000_000_000, nil) a.NoError(err) burn, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) a.NoError(err) @@ -172,7 +177,7 @@ func TestBasicPayouts(t *testing.T) { // 32 rounds) only account01 (who is eligible) is proposing, so drainage // will happen soon after. - offline, err := c15.MakeUnsignedGoOfflineTx(account15.Address, 0, 0, 1000, [32]byte{}) + offline, err := c15.MakeUnsignedGoOfflineTx(account15.Address, 0, 0, proto.MinTxnFee, [32]byte{}) a.NoError(err) wh, err := c15.GetUnencryptedWalletHandle() a.NoError(err) @@ -233,7 +238,7 @@ func TestBasicPayouts(t *testing.T) { a.NoError(err) // put 50 algos back into the feesink, show it pays out again - txn, err = c01.SendPaymentFromUnencryptedWallet(account01.Address, feesink.String(), 1000, 50_000_000, nil) + txn, err = c01.SendPaymentFromUnencryptedWallet(account01.Address, feesink.String(), proto.MinTxnFee, 50_000_000, nil) a.NoError(err) refill, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) fmt.Printf("refilled fee sink in %d\n", *refill.ConfirmedRound) @@ -254,7 +259,7 @@ func TestBasicPayouts(t *testing.T) { wh, err = c01.GetUnencryptedWalletHandle() a.NoError(err) junk := basics.Address{0x01, 0x01}.String() - txn, err = c01.SendPaymentFromWallet(wh, nil, account01.Address, junk, 1000, 0, nil, junk /* close to */, 0, 0) + txn, err = c01.SendPaymentFromWallet(wh, nil, account01.Address, junk, proto.MinTxnFee, 0, nil, junk /* close to */, 0, 0) a.NoError(err) close, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) a.NoError(err) @@ -276,8 +281,10 @@ func TestBasicPayouts(t *testing.T) { data, err = relay.AccountData(feesink.String()) a.NoError(err) // Don't want to bother dealing with the exact fees paid in/out. - a.Less(data.MicroAlgos.Raw, expected+5000) - a.Greater(data.MicroAlgos.Raw, expected-5000) + // Allow wider tolerance to account for varying MinTxnFee values across protocol versions + tolerance := basics.Round(100000) // 0.1 Algo tolerance + a.Less(data.MicroAlgos.Raw, uint64(expected+tolerance)) + a.Greater(data.MicroAlgos.Raw, uint64(expected-tolerance)) // Lest one be concerned about that cavalier attitude, wait for a few more // rounds, and show feesink is unchanged. @@ -296,7 +303,7 @@ func getblock(client libgoal.Client, round basics.Round) (bookkeeping.Block, err return client.BookkeepingBlock(round) } -func rekeyreg(a *require.Assertions, client libgoal.Client, address string, becomeEligible bool) basics.AccountData { +func rekeyreg(a *require.Assertions, client libgoal.Client, proto config.ConsensusParams, address string, becomeEligible bool) basics.AccountData { // we start by making an _offline_ tx here, because we want to populate the // key material ourself with a copy of the account's existing material. That // makes it an _online_ keyreg. That allows the running node to chug along @@ -304,7 +311,7 @@ func rekeyreg(a *require.Assertions, client libgoal.Client, address string, beco // IncentiveEligible, and to get some funds into FeeSink because we will // watch it drain toward bottom of test. - fee := uint64(1000) + fee := proto.MinTxnFee if becomeEligible { fee = 12_000_000 } diff --git a/test/e2e-go/features/incentives/suspension_test.go b/test/e2e-go/features/incentives/suspension_test.go index edc32fe881..497822310a 100644 --- a/test/e2e-go/features/incentives/suspension_test.go +++ b/test/e2e-go/features/incentives/suspension_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/algorand/go-algorand/config" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" @@ -40,6 +41,7 @@ func TestBasicSuspension(t *testing.T) { t.Parallel() a := require.New(fixtures.SynchronizedTest(t)) + consensusVersion := protocol.ConsensusFuture // Overview of this test: // Start a three-node network (70,20,10), all online // Wait for 10 and 20% nodes to propose (we never suspend accounts with lastProposed=lastHeartbeat=0) @@ -54,7 +56,7 @@ func TestBasicSuspension(t *testing.T) { // get back online after being suspended. (0.8^32 is very small) const lookback = 32 - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.FasterConsensus(consensusVersion, time.Second, lookback) fixture.Setup(t, filepath.Join("nettemplates", "Suspension.json")) defer fixture.Shutdown() @@ -70,8 +72,9 @@ func TestBasicSuspension(t *testing.T) { c10, account10 := clientAndAccount("Node10") c20, account20 := clientAndAccount("Node20") - rekeyreg(a, c10, account10.Address, true) - rekeyreg(a, c20, account20.Address, true) + proto := config.Consensus[consensusVersion] + rekeyreg(a, c10, proto, account10.Address, true) + rekeyreg(a, c20, proto, account20.Address, true) // Accounts are now suspendable whether they have proposed yet or not // because keyreg sets LastHeartbeat. Stop c20 which means account20 will be diff --git a/test/e2e-go/features/incentives/whalejoin_test.go b/test/e2e-go/features/incentives/whalejoin_test.go index 7c7ae400d9..9334cfe03a 100644 --- a/test/e2e-go/features/incentives/whalejoin_test.go +++ b/test/e2e-go/features/incentives/whalejoin_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/algorand/go-algorand/config" "github.com/stretchr/testify/require" v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" @@ -196,9 +197,10 @@ func TestBigIncrease(t *testing.T) { t.Parallel() a := require.New(fixtures.SynchronizedTest(t)) + consensusVersion := protocol.ConsensusFuture var fixture fixtures.RestClientFixture const lookback = 32 - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second/2, lookback) + fixture.FasterConsensus(consensusVersion, time.Second/2, lookback) fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) defer fixture.Shutdown() @@ -227,7 +229,8 @@ func TestBigIncrease(t *testing.T) { // have a fairly recent proposal, and not get knocked off. pay(a, c1, account01.Address, account15.Address, 99*account01.Amount/100) - rekeyreg(a, c1, account01.Address, true) + proto := config.Consensus[consensusVersion] + rekeyreg(a, c1, proto, account01.Address, true) // 2. Wait lookback rounds wait(&fixture, a, lookback) @@ -253,7 +256,9 @@ func wait(f *fixtures.RestClientFixture, a *require.Assertions, count basics.Rou func pay(a *require.Assertions, c libgoal.Client, from string, to string, amount uint64) v2.PreEncodedTxInfo { - pay, err := c.SendPaymentFromUnencryptedWallet(from, to, 1000, amount, nil) + params, err := c.SuggestedParams() + a.NoError(err) + pay, err := c.SendPaymentFromUnencryptedWallet(from, to, params.MinFee, amount, nil) a.NoError(err) tx, err := c.WaitForConfirmedTxn(pay.LastValid, pay.ID().String()) a.NoError(err) diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go index bc7d920bef..e2ee562121 100644 --- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go +++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go @@ -60,7 +60,7 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco sAccount := part.Address().String() sWH, err := client.GetUnencryptedWalletHandle() require.NoError(t, err) - goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.Fee, txParams.LastRound+1, txParams.LastRound+1, [32]byte{}, true) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.MinFee, txParams.LastRound+1, txParams.LastRound+1, [32]byte{}, true) assert.NoError(t, err) require.Equal(t, sAccount, goOnlineTx.Src().String()) onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx) diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go index 6e63e1b198..e16a2fc270 100644 --- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go +++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go @@ -249,12 +249,8 @@ func prepareParticipationKey(a *require.Assertions, fixture *fixtures.RestClient partkeyHandle.Vacuum(context.Background()) persistedParticipation.Close() - unsignedTxn := persistedParticipation.GenerateRegistrationTransaction(basics.MicroAlgos{Raw: 1000}, basics.Round(txStartRound), basics.Round(txEndRound), [32]byte{}, c.EnableStateProofKeyregCheck) + unsignedTxn := persistedParticipation.GenerateRegistrationTransaction(basics.MicroAlgos{Raw: c.MinTxnFee}, basics.Round(txStartRound), basics.Round(txEndRound), [32]byte{}, c.EnableStateProofKeyregCheck) copy(unsignedTxn.GenesisHash[:], genesisHash[:]) - if err != nil { - a.NoError(err) - return err - } regTransactions[int(txStartRound)] = unsignedTxn.Sign(rootAccount.Secrets()) - return err + return nil } diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go index a83113afe4..df13c1bdc5 100644 --- a/test/e2e-go/features/stateproofs/stateproofs_test.go +++ b/test/e2e-go/features/stateproofs/stateproofs_test.go @@ -694,18 +694,18 @@ func installParticipationKey(t *testing.T, client libgoal.Client, addr string, f } func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) model.NodeStatusResponse { - currentRnd, err := client.CurrentRound() + txParams, err := client.SuggestedParams() require.NoError(t, err) sAccount := part.Address().String() sWH, err := client.GetUnencryptedWalletHandle() require.NoError(t, err) - goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, 1000, currentRnd, part.LastValid, [32]byte{}, true) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.MinFee, txParams.LastRound+1, part.LastValid, [32]byte{}, true) assert.NoError(t, err) require.Equal(t, sAccount, goOnlineTx.Src().String()) onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx) require.NoError(t, err) require.NotEmpty(t, onlineTxID) - status, err := client.WaitForRound(currentRnd + 1) + status, err := client.WaitForRound(txParams.LastRound + 1) require.NoError(t, err) return status } @@ -1112,7 +1112,7 @@ func TestAtMostOneSPFullPoolWithLoad(t *testing.T) { ps.amount = cntr cntr = cntr + 1 // ignore the returned error (most of the time will be error) - _, err := relay.SendPaymentFromUnencryptedWallet(account0, account0, params.Fee, ps.amount, []byte{byte(params.LastRound)}) + _, err := relay.SendPaymentFromUnencryptedWallet(account0, account0, params.MinFee, ps.amount, []byte{byte(params.LastRound)}) require.Error(t, err) require.Equal(t, "HTTP 400 Bad Request: TransactionPool.checkPendingQueueSize: transaction pool have reached capacity", err.Error()) time.Sleep(25 * time.Millisecond) diff --git a/test/e2e-go/features/transactions/application_test.go b/test/e2e-go/features/transactions/application_test.go index a3aa476b61..8068b8c6fb 100644 --- a/test/e2e-go/features/transactions/application_test.go +++ b/test/e2e-go/features/transactions/application_test.go @@ -58,7 +58,7 @@ func TestApplication(t *testing.T) { _, err = client.GetUnencryptedWalletHandle() a.NoError(err) - fee := uint64(1000) + fee := proto.MinTxnFee counter := `#pragma version 5 int 1 diff --git a/test/e2e-go/features/transactions/close_account_test.go b/test/e2e-go/features/transactions/close_account_test.go index c0eca4f487..f6bcd791cb 100644 --- a/test/e2e-go/features/transactions/close_account_test.go +++ b/test/e2e-go/features/transactions/close_account_test.go @@ -61,12 +61,15 @@ func TestAccountsCanClose(t *testing.T) { status, err := client.Status() a.NoError(err) + params, err := client.SuggestedParams() + a.NoError(err) + // Transfer some money to acct0 and wait. - tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1000, 10000000, nil) + tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, params.MinFee, 10000000, nil) a.NoError(err) fixture.WaitForConfirmedTxn(status.LastRound+10, tx.ID().String()) - tx, err = client.SendPaymentFromWallet(walletHandle, nil, acct0, acct1, 1000, 1000000, nil, acct2, 0, 0) + tx, err = client.SendPaymentFromWallet(walletHandle, nil, acct0, acct1, params.MinFee, 1000000, nil, acct2, 0, 0) a.NoError(err) fixture.WaitForConfirmedTxn(status.LastRound+10, tx.ID().String()) diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go index e51693514b..a03c095ebd 100644 --- a/test/e2e-go/features/transactions/proof_test.go +++ b/test/e2e-go/features/transactions/proof_test.go @@ -76,6 +76,9 @@ func TestTxnMerkleProof(t *testing.T) { status, err := client.Status() a.NoError(err) + params, err := client.SuggestedParams() + a.NoError(err) + // Transfer some money to acct0, as well as other random accounts to // fill up the Merkle tree with more than one element. // we do not want to have a full tree in order the catch an empty element edge case @@ -83,11 +86,11 @@ func TestTxnMerkleProof(t *testing.T) { accti, err := client.GenerateAddress(walletHandle) a.NoError(err) - _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, 1000, 10000000, nil) + _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, params.MinFee, 10000000, nil) a.NoError(err) } - tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1000, 10000000, nil) + tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, params.MinFee, 10000000, nil) a.NoError(err) txid := tx.ID() @@ -160,6 +163,9 @@ func TestTxnMerkleProofSHA256(t *testing.T) { status, err := client.Status() a.NoError(err) + params, err := client.SuggestedParams() + a.NoError(err) + // Transfer some money to acct0, as well as other random accounts to // fill up the Merkle tree with more than one element. // we do not want to have a full tree in order the catch an empty element edge case @@ -167,11 +173,11 @@ func TestTxnMerkleProofSHA256(t *testing.T) { accti, err := client.GenerateAddress(walletHandle) a.NoError(err) - _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, 1000, 10000000, nil) + _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, params.MinFee, 10000000, nil) a.NoError(err) } - tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1000, 10000000, nil) + tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, params.MinFee, 10000000, nil) a.NoError(err) txid := tx.ID() diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go index e9a07f9f72..287aef0abd 100644 --- a/test/e2e-go/restAPI/other/misc_test.go +++ b/test/e2e-go/restAPI/other/misc_test.go @@ -123,8 +123,10 @@ func TestSendingNotClosingAccountErrs(t *testing.T) { if someAddress == "" { t.Error("no addr with funds") } - amt := someBal - 10000 - 1 - _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, amt, nil, "", 0, 0) + params, err := testClient.SuggestedParams() + a.NoError(err) + amt := someBal - params.MinFee - 1 + _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, params.MinFee, amt, nil, "", 0, 0) a.Error(err) } diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go index d7608d259b..b6a4fc96cb 100644 --- a/test/e2e-go/restAPI/restClient_test.go +++ b/test/e2e-go/restAPI/restClient_test.go @@ -17,7 +17,6 @@ package restapi import ( - "context" "flag" "math" "os" @@ -150,9 +149,11 @@ func TestClientRejectsBadFromAddressWhenSending(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) badAccountAddress := "This is absolutely not a valid account address." goodAccountAddress := addresses[0] - _, err = testClient.SendPaymentFromWallet(wh, nil, badAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, badAccountAddress, goodAccountAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -167,9 +168,11 @@ func TestClientRejectsBadToAddressWhenSending(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) badAccountAddress := "This is absolutely not a valid account address." goodAccountAddress := addresses[0] - _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, badAccountAddress, 10000, 100000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, badAccountAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -184,6 +187,8 @@ func TestClientRejectsMutatedFromAddressWhenSending(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) goodAccountAddress := addresses[0] var unmutatedAccountAddress string if len(addresses) > 1 { @@ -193,7 +198,7 @@ func TestClientRejectsMutatedFromAddressWhenSending(t *testing.T) { a.NoError(err) } mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0) - _, err = testClient.SendPaymentFromWallet(wh, nil, mutatedAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, mutatedAccountAddress, goodAccountAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -208,6 +213,8 @@ func TestClientRejectsMutatedToAddressWhenSending(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) goodAccountAddress := addresses[0] var unmutatedAccountAddress string if len(addresses) > 1 { @@ -217,7 +224,7 @@ func TestClientRejectsMutatedToAddressWhenSending(t *testing.T) { a.NoError(err) } mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0) - _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, mutatedAccountAddress, 10000, 100000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, mutatedAccountAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -232,9 +239,11 @@ func TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) goodAccountAddress := addresses[0] nodeDoesNotHaveKeyForThisAddress := "NJY27OQ2ZXK6OWBN44LE4K43TA2AV3DPILPYTHAJAMKIVZDWTEJKZJKO4A" - _, err = testClient.SendPaymentFromWallet(wh, nil, nodeDoesNotHaveKeyForThisAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, nodeDoesNotHaveKeyForThisAddress, goodAccountAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -250,6 +259,8 @@ func TestClientOversizedNote(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) fromAddress := addresses[0] var toAddress string if len(addresses) > 1 { @@ -260,7 +271,7 @@ func TestClientOversizedNote(t *testing.T) { } maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes note := make([]byte, maxTxnNoteBytes+1) - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, note, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, params.MinFee, 100000, note, "", 0, 0) a.Error(err) } @@ -276,6 +287,8 @@ func TestClientCanSendAndGetNote(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) _, someAddress := GetMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") @@ -283,7 +296,7 @@ func TestClientCanSendAndGetNote(t *testing.T) { toAddress := GetDestAddr(t, testClient, addresses, someAddress, wh) maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes note := make([]byte, maxTxnNoteBytes) - tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, note, "", 0, 0) + tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, params.MinFee, 100000, note, "", 0, 0) a.NoError(err) txStatus, err := WaitForTransaction(t, testClient, tx.ID().String(), 30*time.Second) a.NoError(err) @@ -302,12 +315,14 @@ func TestClientCanGetTransactionStatus(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) _, someAddress := GetMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") } toAddress := GetDestAddr(t, testClient, addresses, someAddress, wh) - tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0) + tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, params.MinFee, 100000, nil, "", 0, 0) t.Log(string(protocol.EncodeJSON(tx))) a.NoError(err) t.Log(tx.ID().String()) @@ -327,6 +342,8 @@ func TestAccountBalance(t *testing.T) { a.NoError(err) addresses, err := testClient.ListAddresses(wh) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) _, someAddress := GetMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") @@ -334,7 +351,7 @@ func TestAccountBalance(t *testing.T) { toAddress, err := testClient.GenerateAddress(wh) a.NoError(err) - tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0) + tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, params.MinFee, 100000, nil, "", 0, 0) a.NoError(err) _, err = WaitForTransaction(t, testClient, tx.ID().String(), 30*time.Second) a.NoError(err) @@ -387,7 +404,7 @@ func TestAccountParticipationInfo(t *testing.T) { Type: protocol.KeyRegistrationTx, Header: transactions.Header{ Sender: addr, - Fee: basics.MicroAlgos{Raw: 10000}, + Fee: basics.MicroAlgos{Raw: params.MinFee}, FirstValid: firstRound, LastValid: lastRound, GenesisHash: gh, @@ -437,8 +454,7 @@ func TestClientCanGetGoRoutines(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.AlgodClient - ctx, ctxCancel := context.WithCancel(context.Background()) - defer ctxCancel() + ctx := t.Context() goRoutines, err := testClient.GetGoRoutines(ctx) a.NoError(err) a.NotEmpty(goRoutines) @@ -466,23 +482,25 @@ func TestSendingTooMuchErrs(t *testing.T) { } fromBalance, err := testClient.GetBalance(fromAddress) a.NoError(err) + params, err := testClient.SuggestedParams() + a.NoError(err) // too much amount - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, fromBalance+100, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, params.MinFee, fromBalance+100, nil, "", 0, 0) t.Log(err) a.Error(err) // waaaay too much amount - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, math.MaxUint64, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, params.MinFee, math.MaxUint64, nil, "", 0, 0) t.Log(err) a.Error(err) // too much fee - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, fromBalance+100, 10000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, fromBalance+100, params.MinFee, nil, "", 0, 0) t.Log(err) a.Error(err) // waaaay too much fee - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, math.MaxUint64, 10000, nil, "", 0, 0) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, math.MaxUint64, params.MinFee, nil, "", 0, 0) t.Log(err) a.Error(err) } @@ -523,7 +541,9 @@ func TestSendingFromEmptyAccountErrs(t *testing.T) { toAddress, err = testClient.GenerateAddress(wh) a.NoError(err) } - _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, nil, "", 0, 0) + params, err := testClient.SuggestedParams() + a.NoError(err) + _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, params.MinFee, 100000, nil, "", 0, 0) a.Error(err) } @@ -556,7 +576,9 @@ func TestSendingTooLittleToEmptyAccountErrs(t *testing.T) { if someAddress == "" { t.Error("no addr with funds") } - _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, 1, nil, "", 0, 0) + params, err := testClient.SuggestedParams() + a.NoError(err) + _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, params.MinFee, 1, nil, "", 0, 0) a.Error(err) } diff --git a/test/e2e-go/restAPI/stateproof/stateproofRestAPI_test.go b/test/e2e-go/restAPI/stateproof/stateproofRestAPI_test.go index a7cc3042fd..038509a89d 100644 --- a/test/e2e-go/restAPI/stateproof/stateproofRestAPI_test.go +++ b/test/e2e-go/restAPI/stateproof/stateproofRestAPI_test.go @@ -83,7 +83,7 @@ func TestStateProofInParticipationInfo(t *testing.T) { Type: protocol.KeyRegistrationTx, Header: transactions.Header{ Sender: addr, - Fee: basics.MicroAlgos{Raw: 10000}, + Fee: basics.MicroAlgos{Raw: params.MinFee}, FirstValid: firstRound, LastValid: lastRound, GenesisHash: gh, @@ -181,7 +181,7 @@ func TestNilStateProofInParticipationInfo(t *testing.T) { Type: protocol.KeyRegistrationTx, Header: transactions.Header{ Sender: addr, - Fee: basics.MicroAlgos{Raw: 10000}, + Fee: basics.MicroAlgos{Raw: params.MinFee}, FirstValid: firstRound, LastValid: lastRound, GenesisHash: gh, diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go index 380690df65..3f7d779afc 100644 --- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go +++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go @@ -17,6 +17,7 @@ package transactions import ( + "maps" "path/filepath" "testing" @@ -82,17 +83,13 @@ func TestManyAccountsCanGoOnline(t *testing.T) { txidsToAccountsWaveTwo := make(map[string]string) for _, account := range txidsToAccountsWaveOne { txidsToChildAccounts := cascadeCreateAndFundAccounts(amountToSend, transactionFee, account, client, a) - for txid, account := range txidsToChildAccounts { - txidsToAccountsWaveTwo[txid] = account - } + maps.Copy(txidsToAccountsWaveTwo, txidsToChildAccounts) } allConfirmed = fixture.WaitForAllTxnsToConfirm(fundingTimeoutRound, txidsToAccountsWaveTwo) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") for _, account := range txidsToAccountsWaveOne { txidsToChildAccounts := cascadeCreateAndFundAccounts(amountToSend, transactionFee, account, client, a) - for txid, account := range txidsToChildAccounts { - txidsToAccountsWaveTwo[txid] = account - } + maps.Copy(txidsToAccountsWaveTwo, txidsToChildAccounts) } allConfirmed = fixture.WaitForAllTxnsToConfirm(fundingTimeoutRound, txidsToAccountsWaveTwo) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") @@ -103,18 +100,14 @@ func TestManyAccountsCanGoOnline(t *testing.T) { txidsToAccountsWaveThree := make(map[string]string) for _, account := range txidsToAccountsWaveTwo { txidsToChildAccounts := cascadeCreateAndFundAccounts(amountToSend, transactionFee, account, client, a) - for txid, account := range txidsToChildAccounts { - txidsToAccountsWaveThree[txid] = account - } + maps.Copy(txidsToAccountsWaveThree, txidsToChildAccounts) } allConfirmed = fixture.WaitForAllTxnsToConfirm(fundingTimeoutRound, txidsToAccountsWaveThree) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") for _, account := range txidsToAccountsWaveTwo { txidsToChildAccounts := cascadeCreateAndFundAccounts(amountToSend, transactionFee, account, client, a) - for txid, account := range txidsToChildAccounts { - txidsToAccountsWaveThree[txid] = account - } + maps.Copy(txidsToAccountsWaveThree, txidsToChildAccounts) } allConfirmed = fixture.WaitForAllTxnsToConfirm(fundingTimeoutRound, txidsToAccountsWaveThree) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") diff --git a/test/e2e-go/upgrades/stateproof_participation_test.go b/test/e2e-go/upgrades/stateproof_participation_test.go index 8935421b37..db33661d25 100644 --- a/test/e2e-go/upgrades/stateproof_participation_test.go +++ b/test/e2e-go/upgrades/stateproof_participation_test.go @@ -114,8 +114,11 @@ func registerKeyInto(client *libgoal.Client, a *require.Assertions, lastValid ba cparams := config.Consensus[ver] + prms, err := client.SuggestedParams() + a.NoError(err) + tx := partKey.GenerateRegistrationTransaction( - basics.MicroAlgos{Raw: 1000}, + basics.MicroAlgos{Raw: prms.MinFee}, 0, 100, [32]byte{}, @@ -123,9 +126,6 @@ func registerKeyInto(client *libgoal.Client, a *require.Assertions, lastValid ba ) if cparams.SupportGenesisHash { - prms, err := client.SuggestedParams() - a.NoError(err) - var genHash crypto.Digest copy(genHash[:], prms.GenesisHash) tx.GenesisHash = genHash diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go index 2c03a704bb..418a2bd6bd 100644 --- a/test/framework/fixtures/expectFixture.go +++ b/test/framework/fixtures/expectFixture.go @@ -140,7 +140,6 @@ func skipExpectTests() bool { // Run Process all expect script files with suffix Test.exp within the current directory func (ef *ExpectFixture) Run() { disabledTest := map[string]string{ - "pingpongTest.exp": "broken", "listExpiredParticipationKeyTest.exp": "flaky", } for testName := range ef.expectFiles { @@ -194,7 +193,7 @@ func (ef *ExpectFixture) Run() { syncTest.Logf("err running '%s': %s\nstdout: %s\nstderr: %s\n", testName, err, outBuf, stderr) syncTest.Fail() } else { - // t.Logf("stdout: %s", string(outBuf.Bytes())) + syncTest.Logf("stdout: %s", outBuf.String()) ef.removeTestDir(workingDir) } }) diff --git a/test/linttest/lintissues.go b/test/linttest/lintissues.go deleted file mode 100644 index d03382dd8a..0000000000 --- a/test/linttest/lintissues.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2019-2025 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package linttest - -import ( - "fmt" -) - -type myStruct struct { - a int32 - b float64 - c bool -} - -func (m *myStruct) couldError() error { - return fmt.Errorf("an error occurred") -} - -func doSomething() { - m := myStruct{a: 2, b: 2.0} - m.couldError() -} diff --git a/test/netperf-go/puppeteer/promMetricFetcher_test.go b/test/netperf-go/puppeteer/promMetricFetcher_test.go index f654f683df..62726c985b 100644 --- a/test/netperf-go/puppeteer/promMetricFetcher_test.go +++ b/test/netperf-go/puppeteer/promMetricFetcher_test.go @@ -19,9 +19,12 @@ package main import ( "fmt" "testing" + + "github.com/algorand/go-algorand/test/partitiontest" ) func TestMetricsFetcher(t *testing.T) { + partitiontest.PartitionTest(t) // this test function was meant for local development test and not as an official unit test. t.Skip() //host := "3.81.68.74" diff --git a/test/netperf-go/puppeteer/puppeteer.go b/test/netperf-go/puppeteer/puppeteer.go index a2a6b385c4..65716eac45 100644 --- a/test/netperf-go/puppeteer/puppeteer.go +++ b/test/netperf-go/puppeteer/puppeteer.go @@ -439,15 +439,17 @@ func (p *puppet) collectMetrics() { fmt.Fprintf(os.Stderr, "Failed to read '%s' : %v\n", telemetryHostFile, err) } else { metricFetcher := makePromMetricFetcher(string(hostNameBytes)) - var err error - if results, err1 := metricFetcher.getMetric(metric.Query); err1 == nil { - if result, err1 := metricFetcher.getSingleValue(results); err1 == nil { - p.metrics[metric.Name] = result - } - } + results, err := metricFetcher.getMetric(metric.Query) if err != nil { fmt.Fprintf(os.Stderr, "Failed to read metric '%s' : %v\n", metric.Name, err) + continue + } + result, err := metricFetcher.getSingleValue(results) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse metric '%s' : %v\n", metric.Name, err) + continue } + p.metrics[metric.Name] = result } default: if verbose { diff --git a/test/netperf-go/puppeteer/puppeteer_test.go b/test/netperf-go/puppeteer/puppeteer_test.go index 937f0de88a..3b68efa0b2 100644 --- a/test/netperf-go/puppeteer/puppeteer_test.go +++ b/test/netperf-go/puppeteer/puppeteer_test.go @@ -18,9 +18,12 @@ package main import ( "testing" + + "github.com/algorand/go-algorand/test/partitiontest" ) func TestMetricsPrintout(t *testing.T) { + partitiontest.PartitionTest(t) // this test function was meant for local development test and not as an official unit test. t.Skip() puppets := []*puppet{ diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py index d024233793..325a8a11e9 100755 --- a/test/scripts/e2e_client_runner.py +++ b/test/scripts/e2e_client_runner.py @@ -118,17 +118,18 @@ def _script_thread_inner(runset, scriptname, timeout): txn = algosdk.transaction.PaymentTxn(maxpubaddr, params, addr, 1_000_000_000_000) stxn = kmd.sign_transaction(pubw, '', txn) txid = algod.send_transaction(stxn) - ptxinfo = None + txinfo = None for _ in range(max_init_wait_rounds): txinfo = algod.pending_transaction_info(txid) - if txinfo.get('round'): + if txinfo.get('confirmed-round'): break status = algod.status_after_block(round_num=round) round = status['last-round'] - if ptxinfo is not None: - sys.stderr.write('failed to initialize temporary test wallet account for test ({}) for {} rounds.\n'.format(scriptname, max_init_wait_rounds)) + if not txinfo or not txinfo.get('confirmed-round'): + sys.stderr.write('failed to initialize temporary test wallet account for test ({}) for {} rounds. txinfo: {}\n'.format(scriptname, max_init_wait_rounds, txinfo)) runset.done(scriptname, False, time.time() - start) + return env = dict(runset.env) env['TEMPDIR'] = os.path.join(env['TEMPDIR'], walletname) diff --git a/test/scripts/e2e_subs/app-accounts.sh b/test/scripts/e2e_subs/app-accounts.sh index d79f30192b..6f90ffbdf5 100755 --- a/test/scripts/e2e_subs/app-accounts.sh +++ b/test/scripts/e2e_subs/app-accounts.sh @@ -7,9 +7,6 @@ date "+${scriptname} start %Y%m%d_%H%M%S" my_dir="$(dirname "$0")" source "$my_dir/rest.sh" "$@" -function rest() { - curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1" -} set -e set -x @@ -23,21 +20,43 @@ TEAL=test/scripts/e2e_subs/tealprogs gcmd="goal -w ${WALLET}" ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') + +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + # Create a smaller account so rewards won't change balances. SMALL=$(${gcmd} account new | awk '{ print $6 }') -# Under one algo receives no rewards -${gcmd} clerk send -a 900000 -f "$ACCOUNT" -t "$SMALL" +# Under one algo receives no rewards. Fund with extra to cover higher MinTxnFee costs. +# Must stay under 1,000,000 microAlgos to avoid earning rewards (which would break exact balance checks) +# This test uses 8 transactions from SMALL and requires minimum balance for app opt-in +# Min balance breakdown (app has 4 global byteslices, 1 local int): +# - Base: 100000 +# - App creation (AppFlatParamsMinBalance): 100000 +# - App opt-in (AppFlatOptInMinBalance): 100000 +# - Global schema: 4 byteslices * (SchemaMinBalancePerEntry + SchemaBytesMinBalance) = 4 * 50000 = 200000 +# - Local schema: 1 int * (SchemaMinBalancePerEntry + SchemaUintMinBalance) = 28500 +# Total: 528500 +NUM_TXNS=8 +MIN_BALANCE_NEEDED=528500 +# Calculate DEPOSIT amount needed for all withdrawal tests (20k + 10k + 18k + 2*fee + 100k min) +DEPOSIT_AMOUNT=$((20000 + 10000 + 18000 + 2*MIN_FEE + 100000)) +SMALL_FUNDING=$((MIN_BALANCE_NEEDED + NUM_TXNS * MIN_FEE + DEPOSIT_AMOUNT + 50000)) +if [ $SMALL_FUNDING -ge 1000000 ]; then + SMALL_FUNDING=999000 +fi +${gcmd} clerk send -a $SMALL_FUNDING -f "$ACCOUNT" -t "$SMALL" function balance { acct=$1; shift goal account balance -a "$acct" | awk '{print $1}' } -[ "$(balance "$ACCOUNT")" = 999999099000 ] -[ "$(balance "$SMALL")" = 900000 ] +[ "$(balance "$ACCOUNT")" = $((1000000000000 - SMALL_FUNDING - MIN_FEE)) ] +[ "$(balance "$SMALL")" = $SMALL_FUNDING ] APPID=$(${gcmd} app create --creator "${SMALL}" --approval-prog=${TEAL}/app-escrow.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }') -[ "$(balance "$SMALL")" = 899000 ] # 1000 fee +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - MIN_FEE)) ] # app create fee function appl { method=$1; shift @@ -66,15 +85,16 @@ function sign { TXID=$(${gcmd} app optin --app-id "$APPID" --from "${SMALL}" | app-txid) # Rest succeeds, no stray inner-txn array [ "$(rest "/v2/transactions/pending/$TXID" | jq '.["inner-txn"]')" == null ] -[ "$(balance "$SMALL")" = 898000 ] # 1000 fee +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 2*MIN_FEE)) ] # app create fee + opt-in fee +DEPOSIT=$DEPOSIT_AMOUNT # Use the pre-calculated deposit amount appl "deposit():void" -o "$T/deposit.tx" -payin 150000 -o "$T/pay1.tx" +payin $DEPOSIT -o "$T/pay1.tx" cat "$T/deposit.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx" sign group ${gcmd} clerk rawsend -f "$T/group.stx" -[ "$(balance "$SMALL")" = 746000 ] # 2 fees, 150,000 deposited -[ "$(balance "$APPACCT")" = 150000 ] +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 4*MIN_FEE - DEPOSIT)) ] # app create + opt-in + 2 group fees + deposit +[ "$(balance "$APPACCT")" = $DEPOSIT ] # Withdraw 20,000 in app. Confirm that inner txn is visible to transaction API. TXID=$(appl "withdraw(uint64):void" --app-arg="int:20000" | app-txid) @@ -86,33 +106,39 @@ TXID=$(appl "withdraw(uint64):void" --app-arg="int:20000" | app-txid) ROUND=$(rest "/v2/transactions/pending/$TXID" | jq '.["confirmed-round"]') rest "/v2/blocks/$ROUND" | jq .block.txns[0].dt.itx -[ "$(balance "$SMALL")" = 765000 ] # 1 fee, 20,000 withdrawn -[ "$(balance "$APPACCT")" = 129000 ] # 20k withdraw, fee paid by app account +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 5*MIN_FEE - DEPOSIT + 20000)) ] # app create + opt-in + 2 group fees + deposit - 20k withdrawn + withdraw call fee +[ "$(balance "$APPACCT")" = $((DEPOSIT - 20000 - MIN_FEE)) ] # deposit - 20k withdrawn - inner txn fee paid by app account -appl "withdraw(uint64):void" --app-arg="int:10000" --fee 2000 -[ "$(balance "$SMALL")" = 773000 ] # 2000 fee, 10k withdrawn -[ "$(balance "$APPACCT")" = 119000 ] # 10k withdraw, fee credit used +appl "withdraw(uint64):void" --app-arg="int:10000" --fee $((MIN_FEE * 2)) +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 7*MIN_FEE - DEPOSIT + 30000)) ] # app create + opt-in + 2 group + withdraw + fee-pooled withdraw + deposit - 30k withdrawn +[ "$(balance "$APPACCT")" = $((DEPOSIT - 20000 - MIN_FEE - 10000)) ] # deposit - 20k withdrawn - first inner txn fee - 10k withdrawn (fee credit used) # Try to get app account below zero -# (By app logic, it's OK - 150k was deposited, but fees have cut in) -appl "withdraw(uint64):void" --app-arg="int:120000" && exit 1 -[ "$(balance "$SMALL")" = 773000 ] # no change -[ "$(balance "$APPACCT")" = 119000 ] # no change +# (By app logic, it's OK - enough was deposited, but fees have cut in) +WITHDRAW_FAIL=$((DEPOSIT - 20000 - MIN_FEE)) # Try to withdraw more than available +appl "withdraw(uint64):void" --app-arg="int:$WITHDRAW_FAIL" && exit 1 +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 7*MIN_FEE - DEPOSIT + 30000)) ] # no change +[ "$(balance "$APPACCT")" = $((DEPOSIT - 20000 - MIN_FEE - 10000)) ] # no change # Try to get app account below min balance by withdrawing too much -appl "withdraw(uint64):void" --app-arg="int:20000" && exit 1 -[ "$(balance "$SMALL")" = 773000 ] # no change -[ "$(balance "$APPACCT")" = 119000 ] # no change +# At this point, app account should have just above 100k min balance +# Calculate a withdrawal that would drop below 100k +WITHDRAW_TOO_MUCH=$((DEPOSIT - 20000 - MIN_FEE - 10000 - 100000 + 1000)) # Try to leave less than 100k +appl "withdraw(uint64):void" --app-arg="int:$WITHDRAW_TOO_MUCH" && exit 1 +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 7*MIN_FEE - DEPOSIT + 30000)) ] # no change +[ "$(balance "$APPACCT")" = $((DEPOSIT - 20000 - MIN_FEE - 10000)) ] # no change # Try to get app account below min balance b/c of fee -appl "withdraw(uint64):void" --app-arg="int:18001" && exit 1 -[ "$(balance "$SMALL")" = 773000 ] # no change -[ "$(balance "$APPACCT")" = 119000 ] # no change +WITHDRAW_FAIL_FEE=$((DEPOSIT - 20000 - MIN_FEE - 10000 - 100000 - MIN_FEE + 1)) # 1 more than allowed +appl "withdraw(uint64):void" --app-arg="int:$WITHDRAW_FAIL_FEE" && exit 1 +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 7*MIN_FEE - DEPOSIT + 30000)) ] # no change +[ "$(balance "$APPACCT")" = $((DEPOSIT - 20000 - MIN_FEE - 10000)) ] # no change # Show that it works AT exactly min balance -appl "withdraw(uint64):void" --app-arg="int:18000" -[ "$(balance "$SMALL")" = 790000 ] # +17k (18k - fee) -[ "$(balance "$APPACCT")" = 100000 ] # -19k (18k + fee) +WITHDRAW_EXACT=$((DEPOSIT - 20000 - MIN_FEE - 10000 - 100000 - MIN_FEE)) # Leaves exactly 100k + fee +appl "withdraw(uint64):void" --app-arg="int:$WITHDRAW_EXACT" +[ "$(balance "$SMALL")" = $(($SMALL_FUNDING - 8*MIN_FEE - DEPOSIT + 30000 + WITHDRAW_EXACT)) ] # All fees + all withdrawals +[ "$(balance "$APPACCT")" = 100000 ] # Exactly at min balance date "+${scriptname} OK %Y%m%d_%H%M%S" diff --git a/test/scripts/e2e_subs/app-assets-access.sh b/test/scripts/e2e_subs/app-assets-access.sh index 9258f5a55b..38078c8276 100755 --- a/test/scripts/e2e_subs/app-assets-access.sh +++ b/test/scripts/e2e_subs/app-assets-access.sh @@ -8,9 +8,6 @@ date "+${scriptname} start %Y%m%d_%H%M%S" my_dir="$(dirname "$0")" source "$my_dir/rest.sh" "$@" -function rest() { - curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1" -} set -e set -x @@ -24,18 +21,61 @@ TEAL=test/scripts/e2e_subs/tealprogs gcmd="goal -w ${WALLET}" ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') -# Create a smaller account so rewards won't change balances. + +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + +# This test uses approximately 24 transactions from SMALL account (similar to app-assets.sh) +NUM_TXNS=24 +NEEDED_FOR_FEES=$((MIN_FEE * NUM_TXNS)) + +# SMALL's minimum balance at peak: +# - Base: 100000 +# - App opt-in (with 1 local int): 128500 (100000 + 25000 + 3500 for schema) +# - 2 created assets (each with auto opt-in): 400000 (2 * 200000) +# - Total: 628500 +MAX_MIN_BALANCE=628500 + +# Total amount needed: fees + min balance +TOTAL_NEEDED=$((NEEDED_FOR_FEES + MAX_MIN_BALANCE + MIN_FEE * 3)) + +# Fund with at least 2 Algos to ensure enough for high MIN_FEE scenarios +# Note: Account will earn rewards (RewardUnit = 1,000,000), so balance checks must be tolerant +SMALL_FUNDING=$((TOTAL_NEEDED > 2000000 ? TOTAL_NEEDED : 2000000)) + +# Tolerance for balance checks (to account for rewards earned) +# Allow up to 5000 microAlgos tolerance per balance check +BALANCE_TOLERANCE=5000 SMALL=$(${gcmd} account new | awk '{ print $6 }') -# Under one algo receives no rewards -${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$SMALL" +${gcmd} clerk send -a $SMALL_FUNDING -f "$ACCOUNT" -t "$SMALL" function balance { acct=$1; shift goal account balance -a "$acct" | awk '{print $1}' } -[ "$(balance "$ACCOUNT")" = 999999000000 ] -[ "$(balance "$SMALL")" = 999000 ] +# Check if balance is within tolerance of expected value +# Usage: check_balance +# Allows BALANCE_TOLERANCE above expected (for rewards) but exact match below +function check_balance { + local acct=$1 + local expected=$2 + local actual + actual=$(balance "$acct") + local diff=$((actual - expected)) + + if [ $diff -lt 0 ] || [ $diff -gt $BALANCE_TOLERANCE ]; then + echo "ERROR: Balance check failed for $acct" + echo " Expected: $expected (tolerance: +0 to +$BALANCE_TOLERANCE)" + echo " Actual: $actual (diff: $diff)" + return 1 + fi + return 0 +} + +check_balance "$ACCOUNT" $((1000000000000 - $SMALL_FUNDING - MIN_FEE)) +check_balance "$SMALL" $SMALL_FUNDING function created_assets { acct=$1; @@ -63,7 +103,7 @@ function assets { } APPID=$(${gcmd} app create --creator "${SMALL}" --approval-prog=${TEAL}/assets-escrow9.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=<(printf '#pragma version 9\nint 1') | grep Created | awk '{ print $6 }') -[ "$(balance "$SMALL")" = 998000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE)) # Use --access on all app calls function appl { @@ -121,16 +161,16 @@ function sign { TXID=$(${gcmd} app optin --app-id "$APPID" --from "${SMALL}" | app-txid) # Rest succeeds, no stray inner-txn array [ "$(rest "/v2/transactions/pending/$TXID" | jq '.["inner-txn"]')" == null ] -[ "$(balance "$SMALL")" = 997000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 2)) ASSETID=$(asset-create 1000000 --name "e2e" --unitname "e" | asset-id) -[ "$(balance "$SMALL")" = 996000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 3)) ${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$APPACCT" ! appl "optin(uint64):void" --app-arg "int:$ASSETID" --foreign-asset="$ASSETID" --from="$SMALL" || exit 1 appl "optin(uint64):void" --app-arg "int:$ASSETID" --foreign-asset="$ASSETID" --from="$SMALL" --holding "$ASSETID+app($APPID)" -[ "$(balance "$APPACCT")" = 998000 ] # 1000 fee -[ "$(balance "$SMALL")" = 995000 ] +check_balance "$APPACCT" $((999000 - MIN_FEE)) +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 4)) # Deposit is exactly like app-assets.sh only sender's local state is accessed appl "deposit():void" -o "$T/deposit.tx" --from="$SMALL" @@ -143,8 +183,8 @@ ${gcmd} clerk rawsend -f "$T/group.stx" [ "$(asset_bal "$SMALL")" = 999000 ] # asset balance [ "$(asset_ids "$APPACCT")" = $ASSETID ] [ "$(asset_bal "$APPACCT")" = 1000 ] -[ "$(balance "$SMALL")" = 993000 ] # 2 fees -[ "$(balance "$APPACCT")" = 998000 ] +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 6)) +check_balance "$APPACCT" $((999000 - MIN_FEE)) # Withdraw 100 in app. Confirm that inner txn is visible to transaction API. ! appl "withdraw(uint64,uint64):void" --app-arg="int:$ASSETID" --app-arg="int:100" --foreign-asset="$ASSETID" --from="$SMALL" || exit 1 @@ -160,28 +200,28 @@ rest "/v2/blocks/$ROUND" | jq .block.txns[0].dt.itx [ "$(asset_bal "$SMALL")" = 999100 ] # 100 asset withdrawn [ "$(asset_bal "$APPACCT")" = 900 ] # 100 asset withdrawn -[ "$(balance "$SMALL")" = 992000 ] # 1 fee -[ "$(balance "$APPACCT")" = 997000 ] # fee paid by app +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 7)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) -appl "${WITHDRAW[@]}" --app-arg="int:100" --fee 2000 +appl "${WITHDRAW[@]}" --app-arg="int:100" --fee $((MIN_FEE * 2)) [ "$(asset_bal "$SMALL")" = 999200 ] # 100 asset withdrawn [ "$(asset_bal "$APPACCT")" = 800 ] # 100 asset withdrawn -[ "$(balance "$SMALL")" = 990000 ] # 2000 fee -[ "$(balance "$APPACCT")" = 997000 ] # fee credit used +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 9)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) # Try to withdraw too much ! appl "${WITHDRAW[@]}" --app-arg="int:1000" || exit 1 [ "$(asset_bal "$SMALL")" = 999200 ] # no changes [ "$(asset_bal "$APPACCT")" = 800 ] -[ "$(balance "$SMALL")" = 990000 ] -[ "$(balance "$APPACCT")" = 997000 ] +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 9)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) # Show that it works AT exact asset balance appl "${WITHDRAW[@]}" --app-arg="int:800" [ "$(asset_bal "$SMALL")" = 1000000 ] [ "$(asset_bal "$APPACCT")" = 0 ] -[ "$(balance "$SMALL")" = 989000 ] -[ "$(balance "$APPACCT")" = 996000 ] # app paid the fee for inner axfer +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 10)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 3)) USER=$(${gcmd} account new | awk '{ print $6 }') #new account ${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER" #fund account diff --git a/test/scripts/e2e_subs/app-assets.sh b/test/scripts/e2e_subs/app-assets.sh index 5543efe783..a7cb3b20f6 100755 --- a/test/scripts/e2e_subs/app-assets.sh +++ b/test/scripts/e2e_subs/app-assets.sh @@ -7,9 +7,6 @@ date "+${scriptname} start %Y%m%d_%H%M%S" my_dir="$(dirname "$0")" source "$my_dir/rest.sh" "$@" -function rest() { - curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1" -} set -e set -x @@ -23,18 +20,61 @@ TEAL=test/scripts/e2e_subs/tealprogs gcmd="goal -w ${WALLET}" ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') -# Create a smaller account so rewards won't change balances. + +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + +# This test uses approximately 24 transactions from SMALL account +NUM_TXNS=24 +NEEDED_FOR_FEES=$((MIN_FEE * NUM_TXNS)) + +# SMALL's minimum balance at peak: +# - Base: 100000 +# - App opt-in (with 1 local int): 128500 (100000 + 25000 + 3500 for schema) +# - 2 created assets (each with auto opt-in): 400000 (2 * 200000) +# - Total: 628500 +MAX_MIN_BALANCE=628500 + +# Total amount needed: fees + min balance +TOTAL_NEEDED=$((NEEDED_FOR_FEES + MAX_MIN_BALANCE + MIN_FEE * 3)) + +# Fund with at least 2 Algos to ensure enough for high MIN_FEE scenarios +# Note: Account will earn rewards (RewardUnit = 1,000,000), so balance checks must be tolerant +SMALL_FUNDING=$((TOTAL_NEEDED > 2000000 ? TOTAL_NEEDED : 2000000)) + +# Tolerance for balance checks (to account for rewards earned) +# Allow up to 5000 microAlgos tolerance per balance check +BALANCE_TOLERANCE=5000 SMALL=$(${gcmd} account new | awk '{ print $6 }') -# Under one algo receives no rewards -${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$SMALL" +${gcmd} clerk send -a $SMALL_FUNDING -f "$ACCOUNT" -t "$SMALL" function balance { acct=$1; shift goal account balance -a "$acct" | awk '{print $1}' } -[ "$(balance "$ACCOUNT")" = 999999000000 ] -[ "$(balance "$SMALL")" = 999000 ] +# Check if balance is within tolerance of expected value +# Usage: check_balance +# Allows BALANCE_TOLERANCE above expected (for rewards) but exact match below +function check_balance { + local acct=$1 + local expected=$2 + local actual + actual=$(balance "$acct") + local diff=$((actual - expected)) + + if [ $diff -lt 0 ] || [ $diff -gt $BALANCE_TOLERANCE ]; then + echo "ERROR: Balance check failed for $acct" + echo " Expected: $expected (tolerance: +0 to +$BALANCE_TOLERANCE)" + echo " Actual: $actual (diff: $diff)" + return 1 + fi + return 0 +} + +check_balance "$ACCOUNT" $((1000000000000 - $SMALL_FUNDING - MIN_FEE)) +check_balance "$SMALL" $SMALL_FUNDING function created_assets { acct=$1; @@ -62,7 +102,7 @@ function assets { } APPID=$(${gcmd} app create --creator "${SMALL}" --approval-prog=${TEAL}/assets-escrow.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }') -[ "$(balance "$SMALL")" = 998000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE)) function appl { method=$1; shift @@ -119,15 +159,15 @@ function sign { TXID=$(${gcmd} app optin --app-id "$APPID" --from "${SMALL}" | app-txid) # Rest succeeds, no stray inner-txn array [ "$(rest "/v2/transactions/pending/$TXID" | jq '.["inner-txn"]')" == null ] -[ "$(balance "$SMALL")" = 997000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 2)) ASSETID=$(asset-create 1000000 --name "e2e" --unitname "e" | asset-id) -[ "$(balance "$SMALL")" = 996000 ] # 1000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 3)) ${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$APPACCT" appl "optin():void" --foreign-asset="$ASSETID" --from="$SMALL" -[ "$(balance "$APPACCT")" = 998000 ] # 1000 fee -[ "$(balance "$SMALL")" = 995000 ] +check_balance "$APPACCT" $((999000 - MIN_FEE)) +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 4)) appl "deposit():void" -o "$T/deposit.tx" --from="$SMALL" asset-deposit 1000 $ASSETID -o "$T/axfer1.tx" @@ -139,8 +179,8 @@ ${gcmd} clerk rawsend -f "$T/group.stx" [ "$(asset_bal "$SMALL")" = 999000 ] # asset balance [ "$(asset_ids "$APPACCT")" = $ASSETID ] [ "$(asset_bal "$APPACCT")" = 1000 ] -[ "$(balance "$SMALL")" = 993000 ] # 2 fees -[ "$(balance "$APPACCT")" = 998000 ] +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 6)) +check_balance "$APPACCT" $((999000 - MIN_FEE)) # Withdraw 100 in app. Confirm that inner txn is visible to transaction API. TXID=$(appl "withdraw(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --from="$SMALL" | app-txid) @@ -154,28 +194,28 @@ rest "/v2/blocks/$ROUND" | jq .block.txns[0].dt.itx [ "$(asset_bal "$SMALL")" = 999100 ] # 100 asset withdrawn [ "$(asset_bal "$APPACCT")" = 900 ] # 100 asset withdrawn -[ "$(balance "$SMALL")" = 992000 ] # 1 fee -[ "$(balance "$APPACCT")" = 997000 ] # fee paid by app +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 7)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) -appl "withdraw(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --fee 2000 --from="$SMALL" +appl "withdraw(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --fee $((MIN_FEE * 2)) --from="$SMALL" [ "$(asset_bal "$SMALL")" = 999200 ] # 100 asset withdrawn -[ "$(balance "$SMALL")" = 990000 ] # 2000 fee +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 9)) [ "$(asset_bal "$APPACCT")" = 800 ] # 100 asset withdrawn -[ "$(balance "$APPACCT")" = 997000 ] # fee credit used +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) # Try to withdraw too much appl "withdraw(uint64):void" --app-arg="int:1000" --foreign-asset="$ASSETID" --from="$SMALL" && exit 1 [ "$(asset_bal "$SMALL")" = 999200 ] # no change [ "$(asset_bal "$APPACCT")" = 800 ] # no change -[ "$(balance "$SMALL")" = 990000 ] -[ "$(balance "$APPACCT")" = 997000 ] +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 9)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 2)) # Show that it works AT exact asset balance appl "withdraw(uint64):void" --app-arg="int:800" --foreign-asset="$ASSETID" --from="$SMALL" [ "$(asset_bal "$SMALL")" = 1000000 ] [ "$(asset_bal "$APPACCT")" = 0 ] -[ "$(balance "$SMALL")" = 989000 ] -[ "$(balance "$APPACCT")" = 996000 ] +check_balance "$SMALL" $((SMALL_FUNDING - MIN_FEE * 10)) +check_balance "$APPACCT" $((999000 - MIN_FEE * 3)) USER=$(${gcmd} account new | awk '{ print $6 }') #new account ${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER" #fund account diff --git a/test/scripts/e2e_subs/app-group.py b/test/scripts/e2e_subs/app-group.py index 7ad92b92c6..3f616f8cf3 100755 --- a/test/scripts/e2e_subs/app-group.py +++ b/test/scripts/e2e_subs/app-group.py @@ -53,7 +53,9 @@ itxn_submit itxn Fee - int 2000 + global MinTxnFee + int 2 + * == assert diff --git a/test/scripts/e2e_subs/app-inner-calls.py b/test/scripts/e2e_subs/app-inner-calls.py index 3268ef87de..1ab6d89f7e 100755 --- a/test/scripts/e2e_subs/app-inner-calls.py +++ b/test/scripts/e2e_subs/app-inner-calls.py @@ -14,7 +14,8 @@ joe = goal.new_account() -_, err = goal.pay(goal.account, joe, amt=500_000) +# Fund joe with extra to cover higher fees (was 500_000) +_, err = goal.pay(goal.account, joe, amt=600_000) assert not err, err # Turn off rewards for precise balance checking @@ -134,7 +135,9 @@ app_account = logic.get_application_address(app_id) # Check balance on app account is right (1m - 1 optin fee) -assert 1_000_000-1000 == goal.balance(app_account), goal.balance(app_account) +# Get the network's minimum fee from suggested params +min_fee = goal.params().min_fee +assert 1_000_000-min_fee == goal.balance(app_account), goal.balance(app_account) assert 0 == goal.balance(app_account, asa_id) # Check min-balance on app account is right (base + 1 asa) assert 200_000 == goal.min_balance(app_account), goal.min_balance(app_account) diff --git a/test/scripts/e2e_subs/app-rekey.py b/test/scripts/e2e_subs/app-rekey.py index efa1accca7..8b2ed5bb67 100755 --- a/test/scripts/e2e_subs/app-rekey.py +++ b/test/scripts/e2e_subs/app-rekey.py @@ -53,7 +53,9 @@ txinfo, err = goal.app_create(joe, goal.assemble(teal)) assert not err, err -joeb = joeb-1000 +# Get the network's minimum fee +min_fee = goal.params().min_fee +joeb = joeb-min_fee app_id = txinfo['application-index'] assert app_id @@ -70,7 +72,7 @@ txinfo, err = goal.app_call(joe, app_id, accounts=[flo]) assert not err, err -joeb = joeb-1000 +joeb = joeb-min_fee assert goal.balance(joe) == joeb+6, goal.balance(joe) # can spend again diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh index 35198091e2..58d27fbb0d 100755 --- a/test/scripts/e2e_subs/asset-misc.sh +++ b/test/scripts/e2e_subs/asset-misc.sh @@ -106,7 +106,7 @@ ${gcmd} asset create --creator "${ACCOUNT}" --manager "${ACCOUNTB}" --reserve "$ EXPERROR='account asset info not found' RES=$(${gcmd} asset info --creator $ACCOUNT --unitname dma 2>&1 || true) if [[ $RES != *"${EXPERROR}"* ]]; then - date '+asset-misc FAIL asset info should fail unless reserve account was opted in %Y%m%d_%H%M%S' + date "+${scriptname} FAIL asset info should fail unless reserve account was opted in %Y%m%d_%H%M%S" exit 1 else echo ok @@ -187,4 +187,28 @@ else exit 1 fi +# Test Scenario - check transferring of the 0 asset +# case 1: send 0 units of 0 asset to self should fail +EXPERROR='asset 0 does not exist or has been deleted' +RES=$(${gcmd} asset send --from "${ACCOUNT}" --to "${ACCOUNT}" --assetid 0 --amount 0 2>&1 || true) +if [[ $RES != *"${EXPERROR}"* ]]; then + date "+${scriptname} FAIL asset transfer of 0 units of 0 asset should not be allowed to self in %Y%m%d_%H%M%S" + exit 1 +else + echo ok +fi + +# case 2: send 0 units of 0 asset to someone else should succeed +${gcmd} asset send --from "${ACCOUNT}" --to "${ACCOUNTB}" --assetid 0 --amount 0 + +# case 3: send 0 units of 0 asset to someone else including a close-to should fail +EXPERROR='asset 0 not present in account' +RES=$(${gcmd} asset send --from "${ACCOUNT}" --to "${ACCOUNTB}" --assetid 0 --amount 0 --close-to "${ACCOUNTB}" 2>&1 || true) +if [[ $RES != *"${EXPERROR}"* ]]; then + date "+${scriptname} FAIL asset transfer of 0 units of 0 asset including a close-to should not be allowed in %Y%m%d_%H%M%S" + exit 1 +else + echo ok +fi + date "+$scriptname OK %Y%m%d_%H%M%S" diff --git a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh index b87b8ae1cb..6568d4716d 100755 --- a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh +++ b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh @@ -1,5 +1,8 @@ #!/bin/bash +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + date '+dynamic-fee-teal-test start %Y%m%d_%H%M%S' set -e @@ -17,6 +20,10 @@ ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }') ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }') LEASE=uImiLf+mqOqs0BFsqIUHBh436N/z964X50e3P9Ii4ac= +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + # Fund ACCOUNTB ${gcmd} clerk send -a 100000000 -f ${ACCOUNT} -t ${ACCOUNTB} @@ -36,14 +43,14 @@ ${gcmd} clerk compile -a ${ACCOUNTB} -s ${TEMPDIR}/dynamic.teal -o ${TEMPDIR}/dy ${gcmd} account delete -a ${ACCOUNTB} # Create first transaction to spend fee (can't sign yet, no group) -${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a 1234 -o ${TEMPDIR}/feefund.txn +${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a ${MIN_FEE} -o ${TEMPDIR}/feefund.txn # # First test (negative) # -# Create second transaction mostly as per template, but pay wrong fee -${gcmd} clerk send --fee=1235 --lease=${LEASE} --firstvalid=1 --lastvalid=1001 -f ${ACCOUNTB} -a=1000000 -t=${ACCOUNTD} --close-to=${ACCOUNTC} -o ${TEMPDIR}/fundedpayment.txn +# Create second transaction mostly as per template, but pay wrong fee (off by 1) +${gcmd} clerk send --fee=$((MIN_FEE + 1)) --lease=${LEASE} --firstvalid=1 --lastvalid=1001 -f ${ACCOUNTB} -a=1000000 -t=${ACCOUNTD} --close-to=${ACCOUNTC} -o ${TEMPDIR}/fundedpayment.txn # Cat txns together cat ${TEMPDIR}/feefund.txn ${TEMPDIR}/fundedpayment.txn > ${TEMPDIR}/group.txn @@ -75,8 +82,8 @@ fi # Second test (positive) # -# Create second transaction as per template -${gcmd} clerk send --fee=1234 --lease=${LEASE} --firstvalid=1 --lastvalid=1001 -f ${ACCOUNTB} -a=1000000 -t=${ACCOUNTD} --close-to=${ACCOUNTC} -o ${TEMPDIR}/fundedpayment.txn +# Create second transaction as per template with correct fee +${gcmd} clerk send --fee=${MIN_FEE} --lease=${LEASE} --firstvalid=1 --lastvalid=1001 -f ${ACCOUNTB} -a=1000000 -t=${ACCOUNTD} --close-to=${ACCOUNTC} -o ${TEMPDIR}/fundedpayment.txn # Cat txns together cat ${TEMPDIR}/feefund.txn ${TEMPDIR}/fundedpayment.txn > ${TEMPDIR}/group.txn diff --git a/test/scripts/e2e_subs/e2e-logs.sh b/test/scripts/e2e_subs/e2e-logs.sh index 67df088f46..3b63e191e8 100755 --- a/test/scripts/e2e_subs/e2e-logs.sh +++ b/test/scripts/e2e_subs/e2e-logs.sh @@ -7,9 +7,6 @@ date "+${scriptname} start %Y%m%d_%H%M%S" my_dir="$(dirname "$0")" source "$my_dir/rest.sh" "$@" -function rest() { - curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1" -} function app_txid { # When app (call or optin) submits, this is how the txid is diff --git a/test/scripts/e2e_subs/e2e-teal-multisig.sh b/test/scripts/e2e_subs/e2e-teal-multisig.sh index d5acb06c18..fa34d2eceb 100755 --- a/test/scripts/e2e_subs/e2e-teal-multisig.sh +++ b/test/scripts/e2e_subs/e2e-teal-multisig.sh @@ -77,7 +77,8 @@ ${gcmd} clerk send --amount 100000 --from ${ACCOUNT_MSIG} --to ${ACCOUNT_A} -L $ echo "Auto-detection correctly used new mode on future consensus" # Verify auto-detection used new mode (LMsig field) -if ! cat ${TEMPDIR}/auto2.lsig | msgpacktool -d | grep -q '"lmsig"'; then +msgpacktool -d < ${TEMPDIR}/auto2.lsig > ${TEMPDIR}/auto2.json +if ! grep -q '"lmsig"' ${TEMPDIR}/auto2.json; then echo "ERROR: Auto-detection did not use new mode (LMsig field not found)" exit 1 fi diff --git a/test/scripts/e2e_subs/example.py b/test/scripts/e2e_subs/example.py index 9e4e97097a..fbe90c2f0f 100755 --- a/test/scripts/e2e_subs/example.py +++ b/test/scripts/e2e_subs/example.py @@ -21,13 +21,22 @@ txid, err = goal.send(pay, confirm=False) # errors early assert "balance 10000 below min 100000" in str(err), err -pay = goal.pay(goal.account, receiver=joe, amt=500_000) +# Fund joe with extra to cover higher fees +# Need to account for: base min balance (100k) + asset (100k) + 2 app optins (200k) + multiple txn fees +# Joe performs ~10 transactions and must maintain min balance throughout +# Joe's balance fluctuates as assets are created/destroyed and apps are opted in/out +min_fee = goal.params().min_fee +# Dynamic funding: base (100k) + max temporary overhead (400k) + txn fees (50 * min_fee as buffer) +# Increased buffer to account for all transactions joe performs +joe_funding = 100_000 + 400_000 + (min_fee * 50) +pay = goal.pay(goal.account, receiver=joe, amt=joe_funding) txinfo, err = goal.send(pay) assert not err, err tx = txinfo['txn']['txn'] -assert tx['amt'] == 500_000 -assert tx['fee'] == 1000 -assert goal.balance(joe) == 500_000 +assert tx['amt'] == joe_funding +# Check fee is the network's min fee +assert tx['fee'] == min_fee +assert goal.balance(joe) == joe_funding # Asset creation acfg = goal.acfg(joe, @@ -113,7 +122,8 @@ assert local_state[b'balance'] == 150_000, local_state # Pay to logicsig, and spend from there, which requires signing by logicsig -fund = goal.pay(goal.account, goal.logic_address(yes), 110_000) +# Fund with: min_balance (100k) + payment (2k) + fee (min_fee) + small buffer +fund = goal.pay(goal.account, goal.logic_address(yes), 100_000 + 10_000 + min_fee) txinfo, err = goal.send(fund) assert not err, err @@ -121,7 +131,9 @@ spend = goal.sign_with_program(spend, yes) txinfo, err = goal.send(spend) assert not err, err -assert goal.balance(goal.logic_address(yes)) == 107_000, goal.balance(goal.logic_address(yes)) +# Logic address should have: initial_funding - payment - fee = (100k + 10k + min_fee) - 2k - min_fee = 108k +expected_logic_balance = 108_000 +assert goal.balance(goal.logic_address(yes)) == expected_logic_balance, goal.balance(goal.logic_address(yes)) stamp = datetime.now().strftime("%Y%m%d_%H%M%S") print(f"{os.path.basename(sys.argv[0])} OK {stamp}") diff --git a/test/scripts/e2e_subs/hdr-access-logicsig.sh b/test/scripts/e2e_subs/hdr-access-logicsig.sh index 32c36d7b28..103695c99a 100755 --- a/test/scripts/e2e_subs/hdr-access-logicsig.sh +++ b/test/scripts/e2e_subs/hdr-access-logicsig.sh @@ -4,6 +4,9 @@ filename=$(basename "$0") scriptname="${filename%.*}" date "+${scriptname} start %Y%m%d_%H%M%S" +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -e set -x set -o pipefail @@ -12,6 +15,10 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') cat >${TEMPDIR}/hdr.teal< ${TEMPDIR}/atomic.teal +algotmpl -d tools/teal/templates/ htlc --fee=$((MIN_FEE * 2)) --hashfn="sha256" --hashimg="9S+9MrKzuG/4jvbEkGKChfSCrxXdyylUH5S89Saj9sc=" --own=${ACCOUNT} --rcv=${ACCOUNTB} --timeout=100000 > ${TEMPDIR}/atomic.teal # Compile the template CONTRACT=$(${gcmd} clerk compile ${TEMPDIR}/atomic.teal | awk '{ print $2 }') @@ -42,13 +49,14 @@ if [[ $RES != *"${EXPERROR}"* ]]; then fi # Succeed in releasing the funds using the correct preimage -${gcmd} clerk send --fee=1000 --from-program ${TEMPDIR}/atomic.teal -a=0 -t=${ZERO_ADDRESS} --close-to=${ACCOUNTB} --argb64=aHVudGVyMg== +${gcmd} clerk send --fee=${MIN_FEE} --from-program ${TEMPDIR}/atomic.teal -a=0 -t=${ZERO_ADDRESS} --close-to=${ACCOUNTB} --argb64=aHVudGVyMg== # Check balance BALANCEB=$(${gcmd} account balance -a ${ACCOUNTB} | awk '{ print $1 }') -# Use >= 9999000 to account for rewards which may have accumulated -if [ $BALANCEB -lt 9999000 ]; then - date "+htlc-teal-test FAIL wanted balance>=9999000 but got ${BALANCEB} %Y%m%d_%H%M%S" +# Expected balance is 10000000 - MIN_FEE (account for rewards which may have accumulated) +EXPECTED_MIN=$((10000000 - MIN_FEE)) +if [ $BALANCEB -lt $EXPECTED_MIN ]; then + date "+htlc-teal-test FAIL wanted balance>=$EXPECTED_MIN but got ${BALANCEB} %Y%m%d_%H%M%S" false fi diff --git a/test/scripts/e2e_subs/keyreg-teal-test.sh b/test/scripts/e2e_subs/keyreg-teal-test.sh index 61402a45e6..da080978ca 100755 --- a/test/scripts/e2e_subs/keyreg-teal-test.sh +++ b/test/scripts/e2e_subs/keyreg-teal-test.sh @@ -2,6 +2,9 @@ date '+keyreg-teal-test start %Y%m%d_%H%M%S' +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -e set -x set -o pipefail @@ -11,6 +14,10 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') ACCOUNTA=$(${gcmd} account new|awk '{ print $6 }') ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') @@ -19,7 +26,8 @@ LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE= DUR=8 PERIOD=8 EXPIRE=10000 -FEE=100000 +# Use a high fee for delegation (allows multiple transactions) +FEE=$((MIN_FEE * 6)) echo "generating new delegate and participation keys for newly-funded account ${ACCOUNTA}" ${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTA} -a 1000000 @@ -96,7 +104,8 @@ if [[ $REGOK != 1 ]]; then fi echo "replay keyreg transaction with different fee" -${gcmd} account changeonlinestatus -a ${ACCOUNTA} -x ${LEASE} --online --firstvalid ${PBOUND} --validrounds `expr ${DUR} + 1` --txfile ${TEMPDIR}/keyreg.tx --fee 100000 +# Use a fee within the allowed range (MIN_FEE * 2 <= MIN_FEE * 6) to ensure we test the lease check, not the fee check +${gcmd} account changeonlinestatus -a ${ACCOUNTA} -x ${LEASE} --online --firstvalid ${PBOUND} --validrounds `expr ${DUR} + 1` --txfile ${TEMPDIR}/keyreg.tx --fee $((MIN_FEE * 2)) dsign ${TEMPDIR}/delegate.keyregkey ${TEMPDIR}/kr.lsig < ${TEMPDIR}/keyreg.tx > ${TEMPDIR}/keyreg.stx RES=$(${gcmd} clerk rawsend -f ${TEMPDIR}/keyreg.stx || true) @@ -111,7 +120,8 @@ echo "generating new delegate and participation keys for newly-funded account ${ DUR=8 PERIOD=8 EXPIRE=10 -FEE=100000 +# Use a high fee for delegation (allows multiple transactions) +FEE=$((MIN_FEE * 6)) ${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTB} -a 1000000 DELKEY=$(algokey generate -f ${TEMPDIR}/delegate.keyregkey | grep "Public key" | awk '{ print $3 }') diff --git a/test/scripts/e2e_subs/keyreg.sh b/test/scripts/e2e_subs/keyreg.sh index b3d852f268..8b08cf6a1a 100755 --- a/test/scripts/e2e_subs/keyreg.sh +++ b/test/scripts/e2e_subs/keyreg.sh @@ -2,6 +2,9 @@ date '+e2e_subs/keyreg.sh start %Y%m%d_%H%M%S' +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -exo pipefail export SHELLOPTS @@ -9,6 +12,10 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') # secret algokey override @@ -19,7 +26,7 @@ KEYS="${TEMPDIR}/foo.keys" TXN="${TEMPDIR}/keyreg.txn" STXN="${TEMPDIR}/keyreg.stxn" algokey part generate --first 1 --last 1000 --parent "${ACCOUNT}" --keyfile "${KEYS}" -algokey part keyreg --network placeholder --keyfile "${KEYS}" --firstvalid 1 --outputFile "${TXN}" +algokey part keyreg --network placeholder --keyfile "${KEYS}" --firstvalid 1 --fee "${MIN_FEE}" --outputFile "${TXN}" # technically algokey could be used to sign at this point, that would require # exporting secrets from the wallet. ${gcmd} clerk sign -i "${TXN}" -o "${STXN}" @@ -28,6 +35,6 @@ ${gcmd} clerk rawsend -f "${STXN}" TXN2="${TEMPDIR}/keydereg.txn" STXN2="${TEMPDIR}/keydereg.stxn" # Test key de-registration -algokey part keyreg --network placeholder --offline --account "${ACCOUNT}" --firstvalid 1 --outputFile "${TXN2}" +algokey part keyreg --network placeholder --offline --account "${ACCOUNT}" --firstvalid 1 --fee "${MIN_FEE}" --outputFile "${TXN2}" ${gcmd} clerk sign -i "${TXN2}" -o "${STXN2}" ${gcmd} clerk rawsend -f "${STXN2}" diff --git a/test/scripts/e2e_subs/periodic-teal-test.sh b/test/scripts/e2e_subs/periodic-teal-test.sh index 6e95e7658d..eef4a8ba32 100755 --- a/test/scripts/e2e_subs/periodic-teal-test.sh +++ b/test/scripts/e2e_subs/periodic-teal-test.sh @@ -2,6 +2,9 @@ date '+periodic-teal-test start %Y%m%d_%H%M%S' +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -e set -x set -o pipefail @@ -11,12 +14,16 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE= -sed s/TMPL_RCV/${ACCOUNTB}/g < tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/100000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/periodic.teal +sed s/TMPL_RCV/${ACCOUNTB}/g < tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/100000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/${MIN_FEE}/g > ${TEMPDIR}/periodic.teal ACCOUNT_PERIODIC=$(${gcmd} clerk compile ${TEMPDIR}/periodic.teal -o ${TEMPDIR}/periodic.tealc|awk '{ print $2 }') diff --git a/test/scripts/e2e_subs/rest-applications-endpoint.sh b/test/scripts/e2e_subs/rest-applications-endpoint.sh index f66491ea85..297d5041d5 100755 --- a/test/scripts/e2e_subs/rest-applications-endpoint.sh +++ b/test/scripts/e2e_subs/rest-applications-endpoint.sh @@ -9,6 +9,7 @@ date "+$0 start %Y%m%d_%H%M%S" # Create an application printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal" +# shellcheck disable=SC2154 # gcmd is defined in rest.sh APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }') # Good request, non-existent app id diff --git a/test/scripts/e2e_subs/rest-assets-endpoint.sh b/test/scripts/e2e_subs/rest-assets-endpoint.sh index 9b6c77b363..16c3ab6e62 100755 --- a/test/scripts/e2e_subs/rest-assets-endpoint.sh +++ b/test/scripts/e2e_subs/rest-assets-endpoint.sh @@ -7,6 +7,7 @@ source "$my_dir/rest.sh" "$@" date "+$0 start %Y%m%d_%H%M%S" +# shellcheck disable=SC2154 # gcmd is defined in rest.sh ASSET_ID=$(${gcmd} asset create --creator "${ACCOUNT}" --total 10000 --decimals 19 --asseturl 'https://www.reddit.com/r/AlgorandOfficial/' --name "spanish coin" --unitname "doubloon" | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev) # Good request, non-existent asset id diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh index 4613a7ba9f..c13c6c0aaa 100755 --- a/test/scripts/e2e_subs/rest.sh +++ b/test/scripts/e2e_subs/rest.sh @@ -68,6 +68,23 @@ function call_delete { base_delete_call "$PUB_TOKEN" "$1" "$2" } +# Helper function for simple REST API calls that returns response to stdout +function rest { + curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1" +} + +# Get the network's minimum transaction fee from suggested params +function get_min_fee { + local fee + fee=$(rest "/v2/transactions/params" | jq -r '.["min-fee"]') + if [ -z "$fee" ] || [ "$fee" = "null" ]; then + echo "ERROR: Failed to get min fee from network" >&2 + exit 1 + else + echo "$fee" + fi +} + function fail_and_exit { printf "\n\nFailed test - $1 ($2): $3\n\n" diff --git a/test/scripts/e2e_subs/single-payer-swap.sh b/test/scripts/e2e_subs/single-payer-swap.sh index a9949e225d..4bd6cf9424 100755 --- a/test/scripts/e2e_subs/single-payer-swap.sh +++ b/test/scripts/e2e_subs/single-payer-swap.sh @@ -4,6 +4,9 @@ filename=$(basename "$0") scriptname="${filename%.*}" date "+${scriptname} start %Y%m%d_%H%M%S" +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -e set -x set -o pipefail @@ -13,6 +16,10 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + PAYER=$(${gcmd} account list|awk '{ print $3 }') MOOCHER=$(${gcmd} account new|awk '{ print $6 }') @@ -29,7 +36,7 @@ cd ${TEMPDIR} ${gcmd} clerk send -a 100 -f "${MOOCHER}" -t "${PAYER}" --fee 2 -o cheap.txn # Since goal was modified to allow < minfee when this feature was added, let's confirm msgpacktool -d < cheap.txn | grep fee | grep 2 -${gcmd} clerk send -a 100 -f "${PAYER}" -t "${MOOCHER}" --fee 2000 -o expensive.txn +${gcmd} clerk send -a 100 -f "${PAYER}" -t "${MOOCHER}" --fee $((MIN_FEE * 2)) -o expensive.txn cat cheap.txn expensive.txn > both.txn ${gcmd} clerk group -i both.txn -o group.txn ${gcmd} clerk sign -i group.txn -o group.stx @@ -47,7 +54,7 @@ if [[ $FOUND != "" ]]; then false fi -${gcmd} clerk send -a 100 -f "${PAYER}" -t "${MOOCHER}" --fee 2000 -o expensive.txn +${gcmd} clerk send -a 100 -f "${PAYER}" -t "${MOOCHER}" --fee $((MIN_FEE * 2)) -o expensive.txn cat cheap.txn expensive.txn > both.txn ${gcmd} clerk group -i both.txn -o group.txn ${gcmd} clerk sign -i group.txn -o group.stx diff --git a/test/scripts/e2e_subs/teal-split-test.sh b/test/scripts/e2e_subs/teal-split-test.sh index 6a364ff4d8..b8c92d9355 100755 --- a/test/scripts/e2e_subs/teal-split-test.sh +++ b/test/scripts/e2e_subs/teal-split-test.sh @@ -2,6 +2,9 @@ date '+teal-split-test start %Y%m%d_%H%M%S' +my_dir="$(dirname "$0")" +source "$my_dir/rest.sh" "$@" + set -e set -x set -o pipefail @@ -11,12 +14,16 @@ WALLET=$1 gcmd="goal -w ${WALLET}" +# Get network's minimum fee +MIN_FEE=$(get_min_fee) +echo "Network MinFee: $MIN_FEE" + ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }') -sed s/TMPL_RCV1/${ACCOUNTB}/g < tools/teal/templates/split.teal.tmpl | sed s/TMPL_RCV2/${ACCOUNTC}/g | sed s/TMPL_RAT1/60/g | sed s/TMPL_RAT2/40/g | sed s/TMPL_MINPAY/100000/g | sed s/TMPL_TIMEOUT/4/g | sed s/TMPL_OWN/${ACCOUNTB}/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/split.teal +sed s/TMPL_RCV1/${ACCOUNTB}/g < tools/teal/templates/split.teal.tmpl | sed s/TMPL_RCV2/${ACCOUNTC}/g | sed s/TMPL_RAT1/60/g | sed s/TMPL_RAT2/40/g | sed s/TMPL_MINPAY/100000/g | sed s/TMPL_TIMEOUT/4/g | sed s/TMPL_OWN/${ACCOUNTB}/g | sed s/TMPL_FEE/${MIN_FEE}/g > ${TEMPDIR}/split.teal ACCOUNT_SPLIT=$(${gcmd} clerk compile ${TEMPDIR}/split.teal -o ${TEMPDIR}/split.tealc|awk '{ print $2 }') diff --git a/test/scripts/e2e_subs/v32/e2e-teal-multisig.sh b/test/scripts/e2e_subs/v32/e2e-teal-multisig.sh index ae7dd4b1f4..275bb8f0e7 100755 --- a/test/scripts/e2e_subs/v32/e2e-teal-multisig.sh +++ b/test/scripts/e2e_subs/v32/e2e-teal-multisig.sh @@ -65,7 +65,8 @@ if [ $? -ne 0 ]; then exit 1 fi echo "Auto-detection correctly used legacy mode on v32" -if ! cat ${TEMPDIR}/auto2.lsig | msgpacktool -d | grep -q '"msig"'; then +msgpacktool -d < ${TEMPDIR}/auto2.lsig > ${TEMPDIR}/auto2.json +if ! grep -q '"msig"' ${TEMPDIR}/auto2.json; then echo "ERROR: Auto-detection did not use legacy mode (Msig field not found)" exit 1 fi diff --git a/test/testdata/configs/config-v37.json b/test/testdata/configs/config-v37.json new file mode 100644 index 0000000000..f47ffed51f --- /dev/null +++ b/test/testdata/configs/config-v37.json @@ -0,0 +1,149 @@ +{ + "Version": 37, + "AccountUpdatesStatsInterval": 5000000000, + "AccountsRebuildSynchronousMode": 1, + "AgreementIncomingBundlesQueueLength": 15, + "AgreementIncomingProposalsQueueLength": 50, + "AgreementIncomingVotesQueueLength": 20000, + "AnnounceParticipationKey": true, + "Archival": false, + "BaseLoggerDebugLevel": 4, + "BlockDBDir": "", + "BlockServiceCustomFallbackEndpoints": "", + "BlockServiceMemCap": 500000000, + "BroadcastConnectionsLimit": -1, + "CadaverDirectory": "", + "CadaverSizeTarget": 0, + "CatchpointDir": "", + "CatchpointFileHistoryLength": 365, + "CatchpointInterval": 10000, + "CatchpointTracking": 0, + "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupBlockValidateMode": 0, + "CatchupFailurePeerRefreshRate": 10, + "CatchupGossipBlockFetchTimeoutSec": 4, + "CatchupHTTPBlockFetchTimeoutSec": 4, + "CatchupLedgerDownloadRetryAttempts": 50, + "CatchupParallelBlocks": 16, + "ColdDataDir": "", + "ConnectionsRateLimitingCount": 60, + "ConnectionsRateLimitingWindowSeconds": 1, + "CrashDBDir": "", + "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", + "DNSSecurityFlags": 9, + "DeadlockDetection": 0, + "DeadlockDetectionThreshold": 30, + "DisableAPIAuth": false, + "DisableLedgerLRUCache": false, + "DisableLocalhostConnectionRateLimit": true, + "DisableNetworking": false, + "DisableOutgoingConnectionThrottling": false, + "EnableAccountUpdatesStats": false, + "EnableAgreementReporting": false, + "EnableAgreementTimeMetrics": false, + "EnableAssembleStats": false, + "EnableBatchVerification": true, + "EnableBlockService": false, + "EnableDHTProviders": false, + "EnableDeveloperAPI": false, + "EnableExperimentalAPI": false, + "EnableFollowMode": false, + "EnableGossipBlockService": true, + "EnableGossipService": true, + "EnableIncomingMessageFilter": false, + "EnableLedgerService": false, + "EnableMetricReporting": false, + "EnableNetDevMetrics": false, + "EnableOutgoingNetworkMessageFiltering": true, + "EnableP2P": false, + "EnableP2PHybridMode": false, + "EnablePingHandler": true, + "EnablePrivateNetworkAccessHeader": false, + "EnableProcessBlockStats": false, + "EnableProfiler": false, + "EnableRequestLogger": false, + "EnableRuntimeMetrics": false, + "EnableTopAccountsReporting": false, + "EnableTxBacklogAppRateLimiting": true, + "EnableTxBacklogRateLimiting": true, + "EnableTxnEvalTracer": false, + "EnableUsageLog": false, + "EnableVerbosedTransactionSyncLogging": false, + "EnableVoteCompression": true, + "EndpointAddress": "127.0.0.1:0", + "FallbackDNSResolverAddress": "", + "ForceFetchTransactions": false, + "ForceRelayMessages": false, + "GoMemLimit": 0, + "GossipFanout": 4, + "HeartbeatUpdateInterval": 600, + "HotDataDir": "", + "IncomingConnectionsLimit": 2400, + "IncomingMessageFilterBucketCount": 5, + "IncomingMessageFilterBucketSize": 512, + "LedgerSynchronousMode": 2, + "LogArchiveDir": "", + "LogArchiveMaxAge": "", + "LogArchiveName": "node.archive.log", + "LogFileDir": "", + "LogSizeLimit": 1073741824, + "MaxAPIBoxPerApplication": 100000, + "MaxAPIResourcesPerAccount": 100000, + "MaxAcctLookback": 4, + "MaxBlockHistoryLookback": 0, + "MaxCatchpointDownloadDuration": 43200000000000, + "MaxConnectionsPerIP": 8, + "MinCatchpointFileDownloadBytesPerSecond": 20480, + "NetAddress": "", + "NetworkMessageTraceServer": "", + "NetworkProtocolVersion": "", + "NodeExporterListenAddress": ":9100", + "NodeExporterPath": "./node_exporter", + "OptimizeAccountsDatabaseOnStartup": false, + "OutgoingMessageFilterBucketCount": 3, + "OutgoingMessageFilterBucketSize": 128, + "P2PHybridIncomingConnectionsLimit": 1200, + "P2PHybridNetAddress": "", + "P2PPersistPeerID": false, + "P2PPrivateKeyLocation": "", + "ParticipationKeysRefreshInterval": 60000000000, + "PeerConnectionsUpdateInterval": 3600, + "PeerPingPeriodSeconds": 0, + "PriorityPeers": {}, + "ProposalAssemblyTime": 500000000, + "PublicAddress": "", + "ReconnectTime": 60000000000, + "ReservedFDs": 256, + "RestConnectionsHardLimit": 2048, + "RestConnectionsSoftLimit": 1024, + "RestReadTimeoutSeconds": 15, + "RestWriteTimeoutSeconds": 120, + "RunHosted": false, + "StatefulVoteCompressionTableSize": 2048, + "StateproofDir": "", + "StorageEngine": "sqlite", + "SuggestedFeeBlockHistory": 3, + "SuggestedFeeSlidingWindowSize": 50, + "TLSCertFile": "", + "TLSKeyFile": "", + "TelemetryToLog": true, + "TrackerDBDir": "", + "TransactionSyncDataExchangeRate": 0, + "TransactionSyncSignificantMessageThreshold": 0, + "TxBacklogAppRateLimitingCountERLDrops": false, + "TxBacklogAppTxPerSecondRate": 100, + "TxBacklogAppTxRateLimiterMaxSize": 1048576, + "TxBacklogRateLimitingCongestionPct": 50, + "TxBacklogReservedCapacityPerPeer": 20, + "TxBacklogServiceRateWindowSeconds": 10, + "TxBacklogSize": 26000, + "TxIncomingFilterMaxSize": 500000, + "TxIncomingFilteringFlags": 1, + "TxPoolExponentialIncreaseFactor": 2, + "TxPoolSize": 75000, + "TxSyncIntervalSeconds": 60, + "TxSyncServeResponseSize": 1000000, + "TxSyncTimeoutSeconds": 30, + "UseXForwardedForAddressField": "", + "VerifiedTranscationsCacheSize": 150000 +} diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod index 59961ea55a..3610115047 100644 --- a/tools/block-generator/go.mod +++ b/tools/block-generator/go.mod @@ -2,32 +2,33 @@ module github.com/algorand/go-algorand/tools/block-generator replace github.com/algorand/go-algorand => ../.. -go 1.23.0 +go 1.25.0 -toolchain go1.23.9 +toolchain go1.25.3 require ( github.com/algorand/avm-abi v0.2.0 github.com/algorand/go-algorand v0.0.0 github.com/algorand/go-codec/codec v1.1.10 - github.com/algorand/go-deadlock v0.2.4 + github.com/algorand/go-deadlock v0.2.5 github.com/lib/pq v1.10.9 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 gopkg.in/yaml.v3 v3.0.1 ) require ( + filippo.io/edwards25519 v1.0.0 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/algorand/falcon v0.1.0 // indirect github.com/algorand/go-sumhash v0.1.0 // indirect - github.com/algorand/msgp v1.1.60 // indirect + github.com/algorand/msgp v1.1.61 // indirect github.com/algorand/sortition v1.0.0 // indirect github.com/algorand/websocket v1.4.6 // indirect github.com/aws/aws-sdk-go v1.34.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.8.1 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect @@ -35,8 +36,7 @@ require ( github.com/cockroachdb/redact v1.0.8 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect - github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -64,6 +64,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.24.3 // indirect @@ -108,7 +109,6 @@ require ( github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -126,7 +126,7 @@ require ( github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect github.com/pion/datachannel v1.5.9 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/ice/v2 v2.3.36 // indirect @@ -151,13 +151,13 @@ require ( github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/quic-go v0.49.1 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect @@ -181,5 +181,4 @@ require ( google.golang.org/protobuf v1.35.1 // indirect gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 // indirect lukechampine.com/blake3 v1.3.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum index d7caa73f45..0ada99f446 100644 --- a/tools/block-generator/go.sum +++ b/tools/block-generator/go.sum @@ -6,6 +6,8 @@ dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -23,12 +25,12 @@ github.com/algorand/falcon v0.1.0 h1:xl832kfZ7hHG6B4p90DQynjfKFGbIUgUOnsRiMZXfAo github.com/algorand/falcon v0.1.0/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ= github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA= github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k= -github.com/algorand/go-deadlock v0.2.4 h1:UMs6GwE2wHC6BUZo5z32/+SrBey1LQjbkZQ3V7DoGVA= -github.com/algorand/go-deadlock v0.2.4/go.mod h1:tewhAviZpVq2cnGHmfT50l6RwWLnuygnfNntCN2fz0M= +github.com/algorand/go-deadlock v0.2.5 h1:Kn3WJMn9+wK1pqJrr2+1/y3Z8p1dcftpr2Mbbl1CShw= +github.com/algorand/go-deadlock v0.2.5/go.mod h1:z0g1kdYBhezsHoEKqYf5dVnP9dGMwOOqqxUSTCk2Oks= github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg= github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc= -github.com/algorand/msgp v1.1.60 h1:+IVUC34+tSj1P2M1mkYtl4GLyfzdzXfBLSw6TDT19M8= -github.com/algorand/msgp v1.1.60/go.mod h1:RqZQBzAFDWpwh5TlabzZkWy+6kwL9cvXfLbU0gD99EA= +github.com/algorand/msgp v1.1.61 h1:IDSCGKLIi60n6j0lHDu37GTsCo9anw49Rq4PTwsDQsQ= +github.com/algorand/msgp v1.1.61/go.mod h1:j9sEjNKkS12H0Yhwov/3MfzhM60n3iyr81Ymzv49pu8= github.com/algorand/sortition v1.0.0 h1:PJiZtdSTBm4nArQrZXBnhlljHXhuyAXRJBqVWowQu3E= github.com/algorand/sortition v1.0.0/go.mod h1:23CZwAbTWPv0bBsq+Php/2J6Y/iXDyzlfcZyepeY5Fo= github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc= @@ -45,8 +47,8 @@ github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -76,10 +78,8 @@ github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 h1:DJK8W/iB+s/qkTtmXSrHA49lp5O3OsR7E6z4byOLy34= github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= +github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -93,7 +93,7 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -208,7 +208,6 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20241017200806-017d972448fc h1:NGyrhhFhwvRAZg02jnYVg3GBQy0qGBKmFQJwaPmpmxs= github.com/google/pprof v0.0.0-20241017200806-017d972448fc/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -236,6 +235,8 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -319,8 +320,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -395,9 +396,6 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= @@ -456,8 +454,8 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 h1:qli3BGQK0tYDkSEvZ/FzZTi9ZrOX86Q6CIhKLGc489A= -github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= @@ -526,8 +524,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= -github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= +github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -581,12 +579,13 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -905,7 +904,5 @@ lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go index 27233b3dbb..e42b893562 100644 --- a/tools/debug/algodump/main.go +++ b/tools/debug/algodump/main.go @@ -132,7 +132,7 @@ func setDumpHandlers(n network.GossipNode) { dh.tags = make(map[protocol.Tag]bool) } else { dh.tags = make(map[protocol.Tag]bool) - for _, t := range strings.Split(*tags, ",") { + for t := range strings.SplitSeq(*tags, ",") { dh.tags[protocol.Tag(t)] = true fmt.Printf("TAG <%s>\n", t) } diff --git a/tools/debug/logfilter/main.go b/tools/debug/logfilter/main.go index 08830e2533..12db2dd785 100644 --- a/tools/debug/logfilter/main.go +++ b/tools/debug/logfilter/main.go @@ -78,7 +78,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer) packageOutputBuffer = "" } else { - fmt.Fprintf(outFile, line+"\r\n") + fmt.Fprint(outFile, line+"\r\n") delete(tests, testName) currentTestName = "" } @@ -93,8 +93,8 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer) packageOutputBuffer = "" } else { - fmt.Fprintf(outFile, test.outputBuffer+"\r\n") - fmt.Fprintf(outFile, line+"\r\n") + fmt.Fprint(outFile, test.outputBuffer+"\r\n") + fmt.Fprint(outFile, line+"\r\n") test.outputBuffer = "" tests[testName] = test currentTestName = "" @@ -113,7 +113,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { continue } if strings.HasPrefix(line, "ok ") { - fmt.Fprintf(outFile, line+"\r\n") + fmt.Fprint(outFile, line+"\r\n") packageOutputBuffer = "" continue } @@ -123,7 +123,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Fprintf(outFile, line+"...\r\n%s\r\n", packageOutputBuffer) } packageOutputBuffer = "" - fmt.Fprintf(outFile, line+"\r\n") + fmt.Fprint(outFile, line+"\r\n") continue } // this is package-oriented output diff --git a/tools/debug/transplanter/main.go b/tools/debug/transplanter/main.go index 2433fecbef..5c0de8b0eb 100644 --- a/tools/debug/transplanter/main.go +++ b/tools/debug/transplanter/main.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . +//nolint:unused // old debug program package main import ( diff --git a/tools/network/dnssec/client.go b/tools/network/dnssec/client.go index d485c3812a..1caf39d0bd 100644 --- a/tools/network/dnssec/client.go +++ b/tools/network/dnssec/client.go @@ -87,7 +87,7 @@ func (r *dnsClient) query(ctx context.Context, name string, qtype uint16) (resp return nil, fmt.Errorf("no answer for (%s, %d) from DNS servers %v", name, qtype, r.servers) } -// QueryRRSet returns resource records of qtype for name and and its signatures +// QueryRRSet returns resource records of qtype for name and its signatures func (r *dnsClient) QueryRRSet(ctx context.Context, name string, qtype uint16) ([]dns.RR, []dns.RRSIG, error) { msg, err := r.query(ctx, name, qtype) if err != nil { diff --git a/tools/network/resolver.go b/tools/network/resolver.go index 2c159fc644..42716ac5eb 100644 --- a/tools/network/resolver.go +++ b/tools/network/resolver.go @@ -28,7 +28,7 @@ const ( // Resolver provides equivalent functionality to the net.Resolver with one exception - it allows to use a provided DNS server instead of relying on the existing default resolver. type Resolver struct { - // DNSAddress is the the DNS server that we'll be trying to connect to. + // DNSAddress is the DNS server that we'll be trying to connect to. dnsAddress net.IPAddr resolver ResolverIf } diff --git a/tools/x-repo-types/go.mod b/tools/x-repo-types/go.mod index bd3a1ab9ed..227fbe4c12 100644 --- a/tools/x-repo-types/go.mod +++ b/tools/x-repo-types/go.mod @@ -1,14 +1,14 @@ module github.com/algorand/go-algorand/tools/x-repo-types -go 1.23.0 +go 1.25.0 -toolchain go1.23.9 +toolchain go1.25.3 replace github.com/algorand/go-algorand => ../.. require ( github.com/algorand/go-algorand v0.0.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 ) @@ -16,6 +16,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/x-repo-types/go.sum b/tools/x-repo-types/go.sum index d012d353c8..6802a2188c 100644 --- a/tools/x-repo-types/go.sum +++ b/tools/x-repo-types/go.sum @@ -1,4 +1,4 @@ -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -6,10 +6,11 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/util/cmdUtils.go b/util/cmdUtils.go index 87d62b15c1..32f88a86f9 100644 --- a/util/cmdUtils.go +++ b/util/cmdUtils.go @@ -42,7 +42,6 @@ func RunFuncWithSpinningCursor(asyncFunc func()) { case <-doneChan: finished = true ticker.Stop() - break case <-ticker.C: fmt.Print(progressStrings[i]) fmt.Print("\b") diff --git a/util/db/dbutil_test.go b/util/db/dbutil_test.go index 03b6308b90..fa559d0750 100644 --- a/util/db/dbutil_test.go +++ b/util/db/dbutil_test.go @@ -417,8 +417,8 @@ func TestSetSynchronousMode(t *testing.T) { ctx, cancelFunc := context.WithCancel(context.Background()) cancelFunc() - require.Error(t, context.Canceled, setSynchrounousModeHelper(true, ctx, SynchronousModeOff, false)) - require.Error(t, context.Canceled, setSynchrounousModeHelper(false, ctx, SynchronousModeOff, false)) + require.ErrorIs(t, setSynchrounousModeHelper(true, ctx, SynchronousModeOff, false), context.Canceled) + require.ErrorIs(t, setSynchrounousModeHelper(false, ctx, SynchronousModeOff, false), context.Canceled) require.Contains(t, setSynchrounousModeHelper(false, context.Background(), SynchronousModeOff-1, false).Error(), "invalid value") require.Contains(t, setSynchrounousModeHelper(false, context.Background(), SynchronousModeExtra+1, false).Error(), "invalid value") @@ -481,11 +481,13 @@ func TestReadingWhileWriting(t *testing.T) { // using Write-Ahead Logging (WAL) func TestLockingTableWhileWritingWAL(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside testLockingTableWhileWriting testLockingTableWhileWriting(t, true) } // using the default Rollback Journal func TestLockingTableWhileWritingJournal(t *testing.T) { + // partitiontest.PartitionTest(t) // partition handled inside testLockingTableWhileWriting testLockingTableWhileWriting(t, false) } diff --git a/util/db/initialize.go b/util/db/initialize.go index 33ee093884..2996b76437 100644 --- a/util/db/initialize.go +++ b/util/db/initialize.go @@ -38,7 +38,7 @@ func Initialize(accessor Accessor, migrations []Migration) error { return InitializeWithContext(ctx, tx, migrations) }) - var sqlError *sqlite3.Error + var sqlError sqlite3.Error if errors.As(err, &sqlError) { return fmt.Errorf("%w. Sql error - Code: %d, Extended Code: %d", err, sqlError.Code, sqlError.ExtendedCode) } diff --git a/util/execpool/stream_test.go b/util/execpool/stream_test.go index 887fc3f27f..b311acddd5 100644 --- a/util/execpool/stream_test.go +++ b/util/execpool/stream_test.go @@ -308,8 +308,7 @@ func TestErrors(t *testing.T) { asyncDelay: make(chan struct{}, 10), } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() inputChan := make(chan InputJob) mbp := mockBatchProcessor{} diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go index 6602dce0bb..ab50110e7d 100644 --- a/util/metrics/counter_test.go +++ b/util/metrics/counter_test.go @@ -66,7 +66,7 @@ func TestMetricCounter(t *testing.T) { test.Lock() defer test.Unlock() - // the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) + // the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) // let's see if we received all the 5 different labels. require.Equal(t, 5, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics) @@ -113,7 +113,7 @@ func TestMetricCounterFastInts(t *testing.T) { test.Lock() defer test.Unlock() - // the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) + // the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) // let's see if we received all the 5 different labels. require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics) @@ -162,7 +162,7 @@ func TestMetricCounterMixed(t *testing.T) { test.Lock() defer test.Unlock() - // the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) + // the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4) // let's see if we received all the 5 different labels. require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics) diff --git a/util/metrics/gauge_test.go b/util/metrics/gauge_test.go index a2c9051482..c6c5c24afc 100644 --- a/util/metrics/gauge_test.go +++ b/util/metrics/gauge_test.go @@ -68,7 +68,7 @@ func TestMetricGauge(t *testing.T) { test.Lock() defer test.Unlock() - // the the loop above we've created 3 separate gauges + // in the loop above we've created 3 separate gauges // let's see if we received all 3 metrics require.Equal(t, 3, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics) diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go index bb0cfce78c..3f782d242f 100644 --- a/util/metrics/metrics_test.go +++ b/util/metrics/metrics_test.go @@ -69,8 +69,8 @@ func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request) if err != nil { return } - lines := strings.Split(string(body), "\n") - for _, line := range lines { + lines := strings.SplitSeq(string(body), "\n") + for line := range lines { if len(line) < 5 { continue } diff --git a/util/metrics/reporter.go b/util/metrics/reporter.go index db9ae5f964..5eeeb55c59 100644 --- a/util/metrics/reporter.go +++ b/util/metrics/reporter.go @@ -206,7 +206,7 @@ func (reporter *MetricReporter) tryInvokeNodeExporter(ctx context.Context) { var err error if nil == reporter.neSync { // try to create it. - if reporter.neSync, err = net.Listen("tcp", nodeExporterSyncAddr); err != nil { + if reporter.neSync, err = net.Listen("tcp", nodeExporterSyncAddr); err != nil { //nolint:gosec // OK to bind to all interfaces // we couldn't get a hold of this port number; that's an expected behaviour for any algod instance that isn't the first one.. return } diff --git a/util/rateLimit.go b/util/rateLimit.go index 8406711418..3b089e4a1b 100644 --- a/util/rateLimit.go +++ b/util/rateLimit.go @@ -507,7 +507,7 @@ func (cm *redCongestionManager) arrivalRateFor(arrivals *[]time.Time) float64 { // client1 will be throttled proportional to its usage of the service rate. // over time, client2 will fall in line with the appropriate service rate, while other clients will be able to use the newly freed capacity // The net effect is that clients who are disproportionately noisy are dropped more often, -// while quieter ones are are dropped less often. +// while quieter ones are dropped less often. // The reason this works is that the serviceRate represents the ability for the given resource to be serviced (ie, the rate at which work is dequeued). // When congestion management is required, the service should attempt a fair distribution of servicing to all clients. // clients who are making requests in excess of our known ability to fairly service requests should be reduced. diff --git a/util/tar/tar.go b/util/tar/tar.go deleted file mode 100644 index 012ead433f..0000000000 --- a/util/tar/tar.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2019-2025 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package tar - -import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" - "strings" -) - -// CompressFolder takes the name of a folder to compress, and the name of a file in which to store it. -func CompressFolder(src, filename string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - return Compress(src, f) -} - -// Compress takes a source and variable writers and walks 'source' writing each file -// found to the tar writer; the purpose for accepting multiple writers is to allow -// for multiple outputs (for example a file, or md5 hash) -// Copied with minor modifications from https://medium.com/@skdomino/taring-untaring-files-in-go-6b07cf56bc07 -func Compress(src string, writers ...io.Writer) error { - - // ensure the src actually exists before trying to tar it - if _, err := os.Stat(src); err != nil { - return fmt.Errorf("Unable to tar files - %v", err.Error()) - } - - mw := io.MultiWriter(writers...) - - gzw := gzip.NewWriter(mw) - defer gzw.Close() - - tw := tar.NewWriter(gzw) - defer tw.Close() - - // walk path - return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { - - // return on any error - if err != nil { - return err - } - - // create a new dir/file header - header, err := tar.FileInfoHeader(fi, fi.Name()) - if err != nil { - return err - } - - // update the name to correctly reflect the desired destination when untaring - header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator)) - - // write the header - if err1 := tw.WriteHeader(header); err1 != nil { - return err1 - } - - // return on non-regular files (thanks to [kumo](https://medium.com/@komuw/just-like-you-did-fbdd7df829d3) for this suggested update) - if !header.FileInfo().Mode().IsRegular() { - return nil - } - - // open files for taring - f, err := os.Open(file) - if err != nil { - return err - } - - // copy file data into tar writer - if _, err := io.Copy(tw, f); err != nil { - return err - } - - // manually close here after each file operation; defering would cause each file close - // to wait until all operations have completed. - f.Close() - - return nil - }) -}