diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7ac7915f225c..58c1a4a62eae 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,21 +3,20 @@ accounts/usbwallet @karalabe accounts/scwallet @gballet -accounts/abi @gballet +accounts/abi @gballet @MariusVanDerWijden cmd/clef @holiman cmd/puppeth @karalabe consensus @karalabe core/ @karalabe @holiman @rjl493456442 -dashboard/ @kurkomisi eth/ @karalabe @holiman @rjl493456442 graphql/ @gballet les/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442 mobile/ @karalabe @ligi +node/ @fjl @renaynay p2p/ @fjl @zsfelfoldi rpc/ @fjl @holiman -p2p/simulations @zelig @janos @justelad -p2p/protocols @zelig @janos @justelad -p2p/testing @zelig @janos @justelad +p2p/simulations @fjl +p2p/protocols @fjl +p2p/testing @fjl signer/ @holiman -whisper/ @gballet @gluk256 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index f87996cdcb94..b86e070eac6b 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,40 +1,34 @@ # Contributing -Thank you for considering to help out with the source code! We welcome -contributions from anyone on the internet, and are grateful for even the -smallest of fixes! - -If you'd like to contribute to go-ethereum, please fork, fix, commit and send a -pull request for the maintainers to review and merge into the main code base. If -you wish to submit more complex changes though, please check up with the core -devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to -ensure those changes are in line with the general philosophy of the project -and/or get some early feedback which can make both your efforts much lighter as +Thank you for considering to help out with the source code! We welcome contributions from anyone on the internet, and +are grateful for even the smallest of fixes! + +If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request for the maintainers to +review and merge into the main code base. If you wish to submit more complex changes though, please check up with the +core devs first on [our gitter channel](https://gitter.im/nebulaai/nbai-node) to ensure those changes are in line with +the general philosophy of the project and/or get some early feedback which can make both your efforts much lighter as well as our review and merge procedures quick and simple. ## Coding guidelines Please make sure your contributions adhere to our coding guidelines: - * Code must adhere to the official Go -[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines -(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go -[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" +* Code must adhere to the official Go + [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines + (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). +* Code must be documented adhering to the official Go + [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. +* Pull requests need to be based on and opened against the `master` branch. +* Commit messages should be prefixed with the package(s) they modify. + * E.g. "eth, rpc: make trace configs optional" ## Can I have feature X -Before you submit a feature request, please check and make sure that it isn't -possible through some other means. The JavaScript-enabled console is a powerful -feature in the right hands. Please check our -[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info -and help. +Before you submit a feature request, please check and make sure that it isn't possible through some other means. The +JavaScript-enabled console is a powerful feature in the right hands. Please check our +[Wiki page](https://github.com/nebulaai/nbai-node/wiki) for more info and help. ## Configuration, dependencies, and tests -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) -for more details on configuring your environment, managing project dependencies -and testing procedures. +Please see the [Developers' Guide](https://github.com/nebulaai/nbai-node/wiki/Developers'-Guide) +for more details on configuring your environment, managing project dependencies and testing procedures. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/bug.md similarity index 52% rename from .github/ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE/bug.md index 4e638166ff71..c5a3654bde61 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,8 +1,10 @@ -Hi there, - -Please note that this is an issue tracker reserved for bug reports and feature requests. - -For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com. +--- +name: Report a bug +about: Something with go-ethereum is not working as expected +title: '' +labels: 'type:bug' +assignees: '' +--- #### System information diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 000000000000..aacd885f9e5e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,17 @@ +--- +name: Request a feature +about: Report a missing feature - e.g. as a step before submitting a PR +title: '' +labels: 'type:feature' +assignees: '' +--- + +# Rationale + +Why should this feature exist? +What are the use-cases? + +# Implementation + +Do you have ideas regarding the implementation of this feature? +Are you willing to implement this feature? \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000000..8f460ab558ec --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,9 @@ +--- +name: Ask a question +about: Something is unclear +title: '' +labels: 'type:docs' +assignees: '' +--- + +This should only be used in very rare cases e.g. if you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. diff --git a/.gitmodules b/.gitmodules index 32bdb3b6e5a6..e69de29bb2d1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "tests"] - path = tests/testdata - url = https://github.com/ethereum/tests diff --git a/.golangci.yml b/.golangci.yml index 44fce8bea302..18b325e2065c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,11 +1,13 @@ # This file configures github.com/golangci/golangci-lint. run: - timeout: 2m + timeout: 3m tests: true # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true + skip-files: + - core/genesis_alloc.go linters: disable-all: true @@ -43,3 +45,6 @@ issues: - path: core/vm/instructions_test.go linters: - goconst + - path: cmd/faucet/ + linters: + - deadcode diff --git a/.travis.yml b/.travis.yml index 9568c13ade7e..1268c6d65724 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,12 +2,21 @@ language: go go_import_path: github.com/ethereum/go-ethereum sudo: false jobs: + allow_failures: + - stage: build + os: osx + go: 1.14.x + env: + - azure-osx + - azure-ios + - cocoapods-ios + include: - # This builder only tests code linters on latest version of Go + # This builder only tests code linters on latest version of Go - stage: lint os: linux dist: xenial - go: 1.13.x + go: 1.15.x env: - lint git: @@ -15,70 +24,15 @@ jobs: script: - go run build/ci.go lint - - stage: build - os: linux - dist: xenial - go: 1.11.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: linux - dist: xenial - go: 1.12.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - # These are the latest Go versions. - - stage: build - os: linux - arch: amd64 - dist: xenial - go: 1.13.x - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - if: type = pull_request - os: linux - arch: arm64 - dist: xenial - go: 1.13.x - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: osx - go: 1.13.x - script: - - echo "Increase the maximum number of open file descriptors on macOS" - - NOFILE=20480 - - sudo sysctl -w kern.maxfiles=$NOFILE - - sudo sysctl -w kern.maxfilesperproc=$NOFILE - - sudo launchctl limit maxfiles $NOFILE $NOFILE - - sudo launchctl limit maxfiles - - ulimit -S -n $NOFILE - - ulimit -n - - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - # This builder does the Ubuntu PPA upload - stage: build if: type = push os: linux dist: xenial - go: 1.13.x + go: 1.15.x env: - ubuntu-ppa + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -92,7 +46,7 @@ jobs: - python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - - go run build/ci.go debsrc -goversion 1.13.4 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " + - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " # This builder does the Linux Azure uploads - stage: build @@ -100,9 +54,10 @@ jobs: os: linux dist: xenial sudo: required - go: 1.13.x + go: 1.15.x env: - azure-linux + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -111,23 +66,23 @@ jobs: - gcc-multilib script: # Build for the primary platforms that Trusty can manage - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch 386 - - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch 386 + - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Switch over GCC to cross compilation (breaks 386, hence why do it here only) - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - sudo ln -s /usr/include/asm-generic /usr/include/asm - - GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc - - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc - - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc + - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc + - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Linux Azure MIPS xgo uploads - stage: build @@ -136,27 +91,28 @@ jobs: dist: xenial services: - docker - go: 1.13.x + go: 1.15.x env: - azure-linux-mips + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Android Maven and Azure uploads - stage: build @@ -179,10 +135,11 @@ jobs: env: - azure-android - maven-android + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz + - curl https://dl.google.com/go/go1.15.5.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go @@ -194,22 +151,23 @@ jobs: - mkdir -p $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum - - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds + - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - stage: build if: type = push os: osx - go: 1.13.x + go: 1.15.x env: - azure-osx - azure-ios - cocoapods-ios + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Build the iOS framework and upload it to CocoaPods and Azure - gem uninstall cocoapods -a -x @@ -224,16 +182,48 @@ jobs: # Workaround for https://github.com/golang/go/issues/23749 - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc' - - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds + - go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds + + # These builders run the tests + - stage: build + os: linux + arch: amd64 + dist: xenial + go: 1.15.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + if: type = pull_request + os: linux + arch: arm64 + dist: xenial + go: 1.15.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + os: linux + dist: xenial + go: 1.14.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES # This builder does the Azure archive purges to avoid accumulating junk - stage: build if: type = cron os: linux dist: xenial - go: 1.13.x + go: 1.15.x env: - azure-purge + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: diff --git a/COPYING b/COPYING index 8d66e8772370..f288702d2fa1 100644 --- a/COPYING +++ b/COPYING @@ -1,7 +1,7 @@ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2014 The go-ethereum Authors. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -616,4 +616,59 @@ above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. \ No newline at end of file +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Dockerfile b/Dockerfile index 114e7620581e..d86b77661186 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git @@ -12,5 +12,5 @@ FROM alpine:latest RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ -EXPOSE 8545 8546 8547 30303 30303/udp +EXPOSE 8545 8546 30303 30303/udp ENTRYPOINT ["geth"] diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 2f661ba01c6f..715213c5de0d 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git @@ -12,4 +12,4 @@ FROM alpine:latest RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/ -EXPOSE 8545 8546 8547 30303 30303/udp +EXPOSE 8545 8546 30303 30303/udp diff --git a/Makefile b/Makefile index 474fe61ed52f..504c601d4725 100644 --- a/Makefile +++ b/Makefile @@ -10,33 +10,36 @@ GOBIN = ./build/bin GO ?= latest +GORUN = env GO111MODULE=on go run -geth: - build/env.sh go run build/ci.go install ./cmd/geth +gnbai: + $(GORUN) build/ci.go install ./cmd/geth @echo "Done building." - @echo "Run \"$(GOBIN)/geth\" to launch geth." + @echo "Run \"$(GOBIN)/gnbai\" to launch gnbai." all: - build/env.sh go run build/ci.go install + $(GORUN) build/ci.go install android: - build/env.sh go run build/ci.go aar --local + $(GORUN) build/ci.go aar --local @echo "Done building." @echo "Import \"$(GOBIN)/geth.aar\" to use the library." + @echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs" + @echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc" ios: - build/env.sh go run build/ci.go xcode --local + $(GORUN) build/ci.go xcode --local @echo "Done building." @echo "Import \"$(GOBIN)/Geth.framework\" to use the library." test: all - build/env.sh go run build/ci.go test + $(GORUN) build/ci.go test lint: ## Run linters. - build/env.sh go run build/ci.go lint + $(GORUN) build/ci.go lint clean: - go clean -cache + env GO111MODULE=on go clean -cache rm -fr build/_workspace/pkg/ $(GOBIN)/* # The devtools target installs tools required for 'go generate'. @@ -63,12 +66,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get @ls -ld $(GOBIN)/geth-linux-* geth-linux-386: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/gnbai @echo "Linux 386 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep 386 geth-linux-amd64: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/gnbai @echo "Linux amd64 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep amd64 @@ -77,42 +80,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar @ls -ld $(GOBIN)/geth-linux-* | grep arm geth-linux-arm-5: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/gnbai @echo "Linux ARMv5 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep arm-5 geth-linux-arm-6: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/gnbai @echo "Linux ARMv6 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep arm-6 geth-linux-arm-7: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/gnbai @echo "Linux ARMv7 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep arm-7 geth-linux-arm64: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/gnbai @echo "Linux ARM64 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep arm64 geth-linux-mips: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/gnbai @echo "Linux MIPS cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep mips geth-linux-mipsle: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/gnbai @echo "Linux MIPSle cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep mipsle geth-linux-mips64: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/gnbai @echo "Linux MIPS64 cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep mips64 geth-linux-mips64le: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/gnbai @echo "Linux MIPS64le cross compilation done:" @ls -ld $(GOBIN)/geth-linux-* | grep mips64le @@ -121,12 +124,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64 @ls -ld $(GOBIN)/geth-darwin-* geth-darwin-386: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/gnbai @echo "Darwin 386 cross compilation done:" @ls -ld $(GOBIN)/geth-darwin-* | grep 386 geth-darwin-amd64: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/gnbai @echo "Darwin amd64 cross compilation done:" @ls -ld $(GOBIN)/geth-darwin-* | grep amd64 @@ -135,11 +138,11 @@ geth-windows: geth-windows-386 geth-windows-amd64 @ls -ld $(GOBIN)/geth-windows-* geth-windows-386: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/gnbai @echo "Windows 386 cross compilation done:" @ls -ld $(GOBIN)/geth-windows-* | grep 386 geth-windows-amd64: - build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth + $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/gnbai @echo "Windows amd64 cross compilation done:" @ls -ld $(GOBIN)/geth-windows-* | grep amd64 diff --git a/README.md b/README.md index 92a7125b4914..6dac2dd70606 100644 --- a/README.md +++ b/README.md @@ -1,348 +1,194 @@ -## Go Ethereum +## Go Nebula AI -Official Golang implementation of the Ethereum protocol. +Fork from Official golang implementation of the Ethereum protocol. -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/ethereum/go-ethereum) -[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) -[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) - -Automated builds are available for stable releases and the unstable master branch. Binary -archives are published at https://geth.ethereum.org/downloads/. +Automated builds are available for stable releases and the unstable master branch. Binary archives are published +at https://github.com/nebulaai/nbai-node/releases. ## Building the source -For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. +For prerequisites and detailed build instructions please read the +[Installation Instructions](https://github.com/nebulaai/nbai-node/wiki/Building-Nebula-AI) +on the wiki. -Building `geth` requires both a Go (version 1.10 or later) and a C compiler. You can install -them using your favourite package manager. Once the dependencies are installed, run +Building gnbai requires both a Go (version 1.7 or later) and a C compiler. You can install them using your favourite +package manager. Once the dependencies are installed, run -```shell -make geth -``` + make gnbai or, to build the full suite of utilities: -```shell -make all -``` + make all ## Executables -The go-ethereum project comes with several wrappers/executables found in the `cmd` -directory. +The go-gnbai project comes with several wrappers/executables found in the `cmd` directory. -| Command | Description | -| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | -| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). | -| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | -| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | +| Command | Description | +|:----------:|-------------| +| **`gnbai`** | Our main Nebula AI CLI client. It is the entry point into the Nebula AI network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `gnbai --help` and the [CLI Wiki page](https://github.com/nebulaai/nbai-node/wiki/Command-Line-Options) for command line options. | +| `abigen` | Source code generator to convert Nebula AI contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/nebulaai/nbai-node/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | +| `bootnode` | Stripped down version of our Nebula AI client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | +| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). | +| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/nebulaai/nbai-node/tree/master/swarm) for more information. | +| `puppeth` | a CLI wizard that aids in creating a new Nebula AI network. | -## Running `geth` +## Running gnbai Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), -but we've enumerated a few common parameter combos to get you up to speed quickly -on how you can run your own `geth` instance. +[CLI Wiki page](https://github.com/nebulaai/nbai-node/wiki/Command-Line-Options)), but we've enumerated a few common +parameter combos to get you up to speed quickly on how you can run your own gnbai instance. -### Full node on the main Ethereum network +### Full node on the main Nebula AI network -By far the most common scenario is people wanting to simply interact with the Ethereum -network: create accounts; transfer funds; deploy and interact with contracts. For this -particular use-case the user doesn't care about years-old historical data, so we can -fast-sync quickly to the current state of the network. To do so: +By far the most common scenario is people wanting to simply interact with the Nebula AI network: +create accounts; transfer funds; deploy and interact with contracts. For this particular use-case the user doesn't care +about years-old historical data, so we can fast-sync quickly to the current state of the network. To do so: -```shell -$ geth console +``` +$ gnbai console ``` This command will: - * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), - causing it to download more data in exchange for avoiding processing the entire history - of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), - (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) - as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). - This tool is optional and if you leave it out you can always attach to an already running - `geth` instance with `geth attach`. - -### A Full node on the Ethereum test network - -Transitioning towards developers, if you'd like to play around with creating Ethereum -contracts, you almost certainly would like to do that without any real money involved until -you get the hang of the entire system. In other words, instead of attaching to the main -network, you want to join the **test** network with your node, which is fully equivalent to -the main network, but with play-Ether only. - -```shell -$ geth --testnet console -``` -The `console` subcommand has the exact same meaning as above and they are equally -useful on the testnet too. Please see above for their explanations if you've skipped here. - -Specifying the `--testnet` flag, however, will reconfigure your `geth` instance a bit: - - * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` - will nest itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on - Linux). Note, on OSX and Linux this also means that attaching to a running testnet node - requires the use of a custom endpoint since `geth attach` will try to attach to a - production node endpoint by default. E.g. - `geth attach /testnet/geth.ipc`. Windows users are not affected by - this. - * Instead of connecting the main Ethereum network, the client will connect to the test - network, which uses different P2P bootnodes, different network IDs and genesis states. - -*Note: Although there are some internal protective measures to prevent transactions from -crossing over between the main network and test network, you should make sure to always -use separate accounts for play-money and real-money. Unless you manually move -accounts, `geth` will by default correctly separate the two networks and will not make any -accounts available between them.* - -### Full node on the Rinkeby test network - -The above test network is a cross-client one based on the ethash proof-of-work consensus -algorithm. As such, it has certain extra overhead and is more susceptible to reorganization -attacks due to the network's low difficulty/security. Go Ethereum also supports connecting -to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) -(operated by members of the community). This network is lighter, more secure, but is only -supported by go-ethereum. - -```shell -$ geth --rinkeby console +* Start gnbai in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to download more data + in exchange for avoiding processing the entire history of the Nebula AI network, which is very CPU intensive. +* Start up gnbai's built-in + interactive [JavaScript console](https://github.com/nebulaai/nbai-node/wiki/JavaScript-Console), + (via the trailing `console` subcommand) through which you can invoke all + official [`web3` methods](https://github.com/nbaiai/wiki/wiki/JavaScript-API) + as well as gnbai's own [management APIs](https://github.com/nebulaai/nbai-node/wiki/Management-APIs). This tool is + optional and if you leave it out you can always attach to an already running gnbai instance with `gnbai attach`. + ``` ### Configuration -As an alternative to passing the numerous flags to the `geth` binary, you can also pass a -configuration file via: +As an alternative to passing the numerous flags to the `gnbai` binary, you can also pass a configuration file via: -```shell -$ geth --config /path/to/your_config.toml ``` -To get an idea how the file should look like you can use the `dumpconfig` subcommand to -export your existing configuration: +$ gnbai --config /path/to/your_config.toml -```shell -$ geth --your-favourite-flags dumpconfig ``` -*Note: This works only with `geth` v1.6.0 and above.* +To get an idea how the file should look like you can use the `dumpconfig` subcommand to export your existing configuration: -#### Docker quick start +``` -One of the quickest ways to get Ethereum up and running on your machine is by using -Docker: +$ gnbai --your-favourite-flags dumpconfig -```shell -docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ - -p 8545:8545 -p 30303:30303 \ - ethereum/client-go ``` -This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the -above command does. It will also create a persistent volume in your home directory for -saving your blockchain as well as map the default ports. There is also an `alpine` tag -available for a slim version of the image. +*Note: This works only with gnbai v0.0.1 and above.* -Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers -and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not -accessible from the outside. +### Programatically interfacing Gnbai nodes -### Programmatically interfacing `geth` nodes +As a developer, sooner rather than later you'll want to start interacting with gnbai and the Nebula AI +network via your own programs and not manually through the console. To aid this, gnbai has built-in +support for a JSON-RPC based APIs ([standard APIs](https://github.com/nbaiai/wiki/wiki/JSON-RPC) and +[gnbai specific APIs](https://github.com/nebulaai/nbai-node/wiki/Management-APIs)). These can be +exposed via HTTP, WebSockets and IPC (unix sockets on unix based platforms, and named pipes on Windows). -As a developer, sooner rather than later you'll want to start interacting with `geth` and the -Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) -and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). -These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based -platforms, and named pipes on Windows). - -The IPC interface is enabled by default and exposes all the APIs supported by `geth`, -whereas the HTTP and WS interfaces need to manually be enabled and only expose a -subset of APIs due to security reasons. These can be turned on/off and configured as -you'd expect. +The IPC interface is enabled by default and exposes all the APIs supported by gnbai, whereas the HTTP +and WS interfaces need to manually be enabled and only expose a subset of APIs due to security reasons. +These can be turned on/off and configured as you'd expect. HTTP based JSON-RPC API options: * `--rpc` Enable the HTTP-RPC server - * `--rpcaddr` HTTP-RPC server listening interface (default: `localhost`) - * `--rpcport` HTTP-RPC server listening port (default: `8545`) - * `--rpcapi` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) + * `--rpcaddr` HTTP-RPC server listening interface (default: "localhost") + * `--rpcport` HTTP-RPC server listening port (default: 8545) + * `--rpcapi` API's offered over the HTTP-RPC interface (default: "eth,net,web3") * `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) * `--ws` Enable the WS-RPC server - * `--wsaddr` WS-RPC server listening interface (default: `localhost`) - * `--wsport` WS-RPC server listening port (default: `8546`) - * `--wsapi` API's offered over the WS-RPC interface (default: `eth,net,web3`) + * `--wsaddr` WS-RPC server listening interface (default: "localhost") + * `--wsport` WS-RPC server listening port (default: 8546) + * `--wsapi` API's offered over the WS-RPC interface (default: "eth,net,web3") * `--wsorigins` Origins from which to accept websockets requests * `--ipcdisable` Disable the IPC-RPC server - * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) + * `--ipcapi` API's offered over the IPC-RPC interface (default: "admin,debug,eth,miner,net,personal,shh,txpool,web3") * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) -You'll need to use your own programming environments' capabilities (libraries, tools, etc) to -connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll -need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You -can reuse the same connection for multiple requests! +You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect +via HTTP, WS or IPC to a gnbai node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification) +on all transports. You can reuse the same connection for multiple requests! -**Note: Please understand the security implications of opening up an HTTP/WS based -transport before doing so! Hackers on the internet are actively trying to subvert -Ethereum nodes with exposed APIs! Further, all browser tabs can access locally -running web servers, so malicious web pages could try to subvert locally available -APIs!** +**Note: Please understand the security implications of opening up an HTTP/WS based transport before +doing so! Hackers on the internet are actively trying to subvert Nebula AI nodes with exposed APIs! +Further, all browser tabs can access locally running webservers, so malicious webpages could try to +subvert locally available APIs!** ### Operating a private network -Maintaining your own private network is more involved as a lot of configurations taken for -granted in the official networks need to be manually set up. +Maintaining your own private network is more involved as a lot of configurations taken for granted in +the official networks need to be manually set up. #### Defining the private genesis state -First, you'll need to create the genesis state of your networks, which all nodes need to be -aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): +First, you'll need to create the genesis state of your networks, which all nodes need to be aware of +and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): ```json { "config": { - "chainId": , - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0 - }, - "alloc": {}, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x20000", - "extraData": "", - "gasLimit": "0x2fefd8", - "nonce": "0x0000000000000042", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x00" + "chainId": 0, + "homesteadBlock": 0, + "eip155Block": 0, + "eip158Block": 0 + }, + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00" } ``` -The above fields should be fine for most purposes, although we'd recommend changing -the `nonce` to some random value so you prevent unknown remote nodes from being able -to connect to you. If you'd like to pre-fund some accounts for easier testing, create -the accounts and populate the `alloc` field with their addresses. +The above fields should be fine for most purposes, although we'd recommend changing the `nonce` to some random value so +you prevent unknown remote nodes from being able to connect to you. If you'd like to pre-fund some accounts for easier +testing, you can populate the `alloc` field with account configs: ```json "alloc": { - "0x0000000000000000000000000000000000000001": { - "balance": "111111111" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "222222222" - } +"0x0000000000000000000000000000000000000001": {"balance": "111111111"}, +"0x0000000000000000000000000000000000000002": { +"balance": "222222222" +} } ``` -With the genesis state defined in the above JSON file, you'll need to initialize **every** -`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly -set: +With the genesis state defined in the above JSON file, you'll need to initialize **every** gnbai node with it prior to +starting it up to ensure all blockchain parameters are correctly set: -```shell -$ geth init path/to/genesis.json ``` - -#### Creating the rendezvous point - -With all nodes that you want to run initialized to the desired genesis state, you'll need to -start a bootstrap node that others can use to find each other in your network and/or over -the internet. The clean way is to configure and run a dedicated bootnode: - -```shell -$ bootnode --genkey=boot.key -$ bootnode --nodekey=boot.key +$ gnbai init path/to/genesis.json ``` -With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format) -that other nodes can use to connect to it and exchange peer information. Make sure to -replace the displayed IP address information (most probably `[::]`) with your externally -accessible IP to get the actual `enode` URL. - -*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less -recommended way.* - #### Starting up your member nodes -With the bootnode operational and externally reachable (you can try -`telnet ` to ensure it's indeed reachable), start every subsequent `geth` -node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will -probably also be desirable to keep the data directory of your private network separated, so -do also specify a custom `--datadir` flag. +With the bootnode operational and externally reachable (you can try `telnet ` to ensure it's indeed +reachable), start every subsequent gnbai node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It +will probably also be desirable to keep the data directory of your private network separated, so do also specify a +custom `--datadir` flag. -```shell -$ geth --datadir=path/to/custom/data/folder --bootnodes= ``` - -*Note: Since your network will be completely cut off from the main and test networks, you'll -also need to configure a miner to process transactions and create new blocks for you.* - -#### Running a private miner - -Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, -requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a -setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) -and the [Genoil miner](https://github.com/Genoil/cpp-ethereum) repository. - -In a private network setting, however a single CPU miner instance is more than enough for -practical purposes as it can produce a stable stream of blocks at the correct intervals -without needing heavy resources (consider running on a single thread, no need for multiple -ones either). To start a `geth` instance for mining, run it with all your usual flags, extended -by: - -```shell -$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 +$ gnbai --datadir=path/to/custom/data/folder --bootnodes= ``` -Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price -transactions are accepted at (`--gasprice`). - -## Contribution - -Thank you for considering to help out with the source code! We welcome contributions -from anyone on the internet, and are grateful for even the smallest of fixes! - -If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request -for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) -to ensure those changes are in line with the general philosophy of the project and/or get -some early feedback which can make both your efforts much lighter as well as our review -and merge procedures quick and simple. - -Please make sure your contributions adhere to our coding guidelines: - - * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) - guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) - guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" - -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) -for more details on configuring your environment, managing project dependencies, and -testing procedures. +*Note: Since your network will be completely cut off from the main and test networks, you'll also need to configure a +miner to process transactions and create new blocks for you.* ## License -The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the -[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), -also included in our repository in the `COPYING.LESSER` file. +The go-gnbai library (i.e. all code outside of the `cmd` directory) is licensed under the +[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), also included in our repository +in the `COPYING.LESSER` file. -The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the -[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also -included in our repository in the `COPYING` file. +The go-gnbai binaries (i.e. all code inside of the `cmd` directory) is licensed under the +[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also included in our repository in +the `COPYING` file. diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 603e956b9df4..5ca7c241db95 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -19,10 +19,12 @@ package abi import ( "bytes" "encoding/json" + "errors" "fmt" "io" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) // The ABI holds information about a contract's context and available @@ -32,6 +34,12 @@ type ABI struct { Constructor Method Methods map[string]Method Events map[string]Event + + // Additional "special" functions introduced in solidity v0.6.0. + // It's separated from the original default fallback. Each contract + // can only define one fallback and receive function. + Fallback Method // Note it's also used to represent legacy fallback before v0.6.0 + Receive Method } // JSON returns a parsed ABI interface and error if it failed. @@ -42,7 +50,6 @@ func JSON(reader io.Reader) (ABI, error) { if err := dec.Decode(&abi); err != nil { return ABI{}, err } - return abi, nil } @@ -70,50 +77,80 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { return nil, err } // Pack up the method ID too if not a constructor and return - return append(method.ID(), arguments...), nil + return append(method.ID, arguments...), nil } -// Unpack output in v according to the abi specification -func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) { +func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event + var args Arguments if method, ok := abi.Methods[name]; ok { if len(data)%32 != 0 { - return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) + return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) } - return method.Outputs.Unpack(v, data) + args = method.Outputs } if event, ok := abi.Events[name]; ok { - return event.Inputs.Unpack(v, data) + args = event.Inputs } - return fmt.Errorf("abi: could not locate named method or event") + if args == nil { + return nil, errors.New("abi: could not locate named method or event") + } + return args, nil } -// UnpackIntoMap unpacks a log into the provided map[string]interface{} -func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) { - // since there can't be naming collisions with contracts and events, - // we need to decide whether we're calling a method or an event - if method, ok := abi.Methods[name]; ok { - if len(data)%32 != 0 { - return fmt.Errorf("abi: improperly formatted output") - } - return method.Outputs.UnpackIntoMap(v, data) +// Unpack unpacks the output according to the abi specification. +func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) { + args, err := abi.getArguments(name, data) + if err != nil { + return nil, err } - if event, ok := abi.Events[name]; ok { - return event.Inputs.UnpackIntoMap(v, data) + return args.Unpack(data) +} + +// UnpackIntoInterface unpacks the output in v according to the abi specification. +// It performs an additional copy. Please only use, if you want to unpack into a +// structure that does not strictly conform to the abi structure (e.g. has additional arguments) +func (abi ABI) UnpackIntoInterface(v interface{}, name string, data []byte) error { + args, err := abi.getArguments(name, data) + if err != nil { + return err + } + unpacked, err := args.Unpack(data) + if err != nil { + return err + } + return args.Copy(v, unpacked) +} + +// UnpackIntoMap unpacks a log into the provided map[string]interface{}. +func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) { + args, err := abi.getArguments(name, data) + if err != nil { + return err } - return fmt.Errorf("abi: could not locate named method or event") + return args.UnpackIntoMap(v, data) } -// UnmarshalJSON implements json.Unmarshaler interface +// UnmarshalJSON implements json.Unmarshaler interface. func (abi *ABI) UnmarshalJSON(data []byte) error { var fields []struct { - Type string - Name string - Constant bool + Type string + Name string + Inputs []Argument + Outputs []Argument + + // Status indicator which can be: "pure", "view", + // "nonpayable" or "payable". + StateMutability string + + // Deprecated Status indicators, but removed in v0.6.0. + Constant bool // True if function is either pure or view + Payable bool // True if function is payable + + // Event relevant indicator represents the event is + // declared as anonymous. Anonymous bool - Inputs []Argument - Outputs []Argument } if err := json.Unmarshal(data, &fields); err != nil { return err @@ -123,51 +160,75 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { for _, field := range fields { switch field.Type { case "constructor": - abi.Constructor = Method{ - Inputs: field.Inputs, + abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) + case "function": + name := abi.overloadedMethodName(field.Name) + abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) + case "fallback": + // New introduced function type in v0.6.0, check more detail + // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function + if abi.HasFallback() { + return errors.New("only single fallback is allowed") } - // empty defaults to function according to the abi spec - case "function", "": - name := field.Name - _, ok := abi.Methods[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", field.Name, idx) - _, ok = abi.Methods[name] + abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil) + case "receive": + // New introduced function type in v0.6.0, check more detail + // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function + if abi.HasReceive() { + return errors.New("only single receive is allowed") } - abi.Methods[name] = Method{ - Name: name, - RawName: field.Name, - Const: field.Constant, - Inputs: field.Inputs, - Outputs: field.Outputs, + if field.StateMutability != "payable" { + return errors.New("the statemutability of receive can only be payable") } + abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) case "event": - name := field.Name - _, ok := abi.Events[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", field.Name, idx) - _, ok = abi.Events[name] - } - abi.Events[name] = Event{ - Name: name, - RawName: field.Name, - Anonymous: field.Anonymous, - Inputs: field.Inputs, - } + name := abi.overloadedEventName(field.Name) + abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) + default: + return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) } } - return nil } -// MethodById looks up a method by the 4-byte id -// returns nil if none found +// overloadedMethodName returns the next available name for a given function. +// Needed since solidity allows for function overload. +// +// e.g. if the abi contains Methods send, send1 +// overloadedMethodName would return send2 for input send. +func (abi *ABI) overloadedMethodName(rawName string) string { + name := rawName + _, ok := abi.Methods[name] + for idx := 0; ok; idx++ { + name = fmt.Sprintf("%s%d", rawName, idx) + _, ok = abi.Methods[name] + } + return name +} + +// overloadedEventName returns the next available name for a given event. +// Needed since solidity allows for event overload. +// +// e.g. if the abi contains events received, received1 +// overloadedEventName would return received2 for input received. +func (abi *ABI) overloadedEventName(rawName string) string { + name := rawName + _, ok := abi.Events[name] + for idx := 0; ok; idx++ { + name = fmt.Sprintf("%s%d", rawName, idx) + _, ok = abi.Events[name] + } + return name +} + +// MethodById looks up a method by the 4-byte id, +// returns nil if none found. func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { if len(sigdata) < 4 { return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata)) } for _, method := range abi.Methods { - if bytes.Equal(method.ID(), sigdata[:4]) { + if bytes.Equal(method.ID, sigdata[:4]) { return &method, nil } } @@ -178,9 +239,41 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { // ABI and returns nil if none found. func (abi *ABI) EventByID(topic common.Hash) (*Event, error) { for _, event := range abi.Events { - if bytes.Equal(event.ID().Bytes(), topic.Bytes()) { + if bytes.Equal(event.ID.Bytes(), topic.Bytes()) { return &event, nil } } return nil, fmt.Errorf("no event with id: %#x", topic.Hex()) } + +// HasFallback returns an indicator whether a fallback function is included. +func (abi *ABI) HasFallback() bool { + return abi.Fallback.Type == Fallback +} + +// HasReceive returns an indicator whether a receive function is included. +func (abi *ABI) HasReceive() bool { + return abi.Receive.Type == Receive +} + +// revertSelector is a special function selector for revert reason unpacking. +var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] + +// UnpackRevert resolves the abi-encoded revert reason. According to the solidity +// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert, +// the provided revert reason is abi-encoded as if it were a call to a function +// `Error(string)`. So it's a special tool for it. +func UnpackRevert(data []byte) (string, error) { + if len(data) < 4 { + return "", errors.New("invalid data for unpacking") + } + if !bytes.Equal(data[:4], revertSelector) { + return "", errors.New("invalid data for unpacking") + } + typ, _ := NewType("string", "", nil) + unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) + if err != nil { + return "", err + } + return unpacked[0].(string), nil +} diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 61ab70eb83ad..ad8acdf52229 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -19,6 +19,7 @@ package abi import ( "bytes" "encoding/hex" + "errors" "fmt" "math/big" "reflect" @@ -26,57 +27,105 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" ) const jsondata = ` [ - { "type" : "function", "name" : "balance", "constant" : true }, - { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] } + { "type" : "function", "name" : ""}, + { "type" : "function", "name" : "balance", "stateMutability" : "view" }, + { "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }, + { "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] }, + { "type" : "function", "name" : "string", "inputs" : [ { "name" : "inputs", "type" : "string" } ] }, + { "type" : "function", "name" : "bool", "inputs" : [ { "name" : "inputs", "type" : "bool" } ] }, + { "type" : "function", "name" : "address", "inputs" : [ { "name" : "inputs", "type" : "address" } ] }, + { "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, + { "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, + { "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] }, + { "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, + { "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, + { "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, + { "type" : "function", "name" : "slice256", "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "sliceAddress", "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] }, + { "type" : "function", "name" : "sliceMultiAddress", "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }, + { "type" : "function", "name" : "nestedArray", "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] }, + { "type" : "function", "name" : "nestedArray2", "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] }, + { "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }, + { "type" : "function", "name" : "receive", "inputs" : [ { "name" : "memo", "type" : "bytes" }], "outputs" : [], "payable" : true, "stateMutability" : "payable" }, + { "type" : "function", "name" : "fixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "fixedArrBytes", "stateMutability" : "view", "inputs" : [ { "name" : "bytes", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "mixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" } ] }, + { "type" : "function", "name" : "doubleFixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }, + { "type" : "function", "name" : "multipleMixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }, + { "type" : "function", "name" : "overloadedNames", "stateMutability" : "view", "inputs": [ { "components": [ { "internalType": "uint256", "name": "_f", "type": "uint256" }, { "internalType": "uint256", "name": "__f", "type": "uint256"}, { "internalType": "uint256", "name": "f", "type": "uint256"}],"internalType": "struct Overloader.F", "name": "f","type": "tuple"}]} ]` -const jsondata2 = ` -[ - { "type" : "function", "name" : "balance", "constant" : true }, - { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }, - { "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] }, - { "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] }, - { "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] }, - { "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] }, - { "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, - { "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, - { "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, - { "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, - { "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, - { "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] }, - { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }, - { "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] }, - { "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] }, - { "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] } -]` +var ( + Uint256, _ = NewType("uint256", "", nil) + Uint32, _ = NewType("uint32", "", nil) + Uint16, _ = NewType("uint16", "", nil) + String, _ = NewType("string", "", nil) + Bool, _ = NewType("bool", "", nil) + Bytes, _ = NewType("bytes", "", nil) + Address, _ = NewType("address", "", nil) + Uint64Arr, _ = NewType("uint64[]", "", nil) + AddressArr, _ = NewType("address[]", "", nil) + Int8, _ = NewType("int8", "", nil) + // Special types for testing + Uint32Arr2, _ = NewType("uint32[2]", "", nil) + Uint64Arr2, _ = NewType("uint64[2]", "", nil) + Uint256Arr, _ = NewType("uint256[]", "", nil) + Uint256Arr2, _ = NewType("uint256[2]", "", nil) + Uint256Arr3, _ = NewType("uint256[3]", "", nil) + Uint256ArrNested, _ = NewType("uint256[2][2]", "", nil) + Uint8ArrNested, _ = NewType("uint8[][2]", "", nil) + Uint8SliceNested, _ = NewType("uint8[][]", "", nil) + TupleF, _ = NewType("tuple", "struct Overloader.F", []ArgumentMarshaling{ + {Name: "_f", Type: "uint256"}, + {Name: "__f", Type: "uint256"}, + {Name: "f", Type: "uint256"}}) +) + +var methods = map[string]Method{ + "": NewMethod("", "", Function, "", false, false, nil, nil), + "balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil), + "send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil), + "test": NewMethod("test", "test", Function, "", false, false, []Argument{{"number", Uint32, false}}, nil), + "string": NewMethod("string", "string", Function, "", false, false, []Argument{{"inputs", String, false}}, nil), + "bool": NewMethod("bool", "bool", Function, "", false, false, []Argument{{"inputs", Bool, false}}, nil), + "address": NewMethod("address", "address", Function, "", false, false, []Argument{{"inputs", Address, false}}, nil), + "uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil), + "uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil), + "int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil), + "foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil), + "bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil), + "slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil), + "slice256": NewMethod("slice256", "slice256", Function, "", false, false, []Argument{{"inputs", Uint256Arr2, false}}, nil), + "sliceAddress": NewMethod("sliceAddress", "sliceAddress", Function, "", false, false, []Argument{{"inputs", AddressArr, false}}, nil), + "sliceMultiAddress": NewMethod("sliceMultiAddress", "sliceMultiAddress", Function, "", false, false, []Argument{{"a", AddressArr, false}, {"b", AddressArr, false}}, nil), + "nestedArray": NewMethod("nestedArray", "nestedArray", Function, "", false, false, []Argument{{"a", Uint256ArrNested, false}, {"b", AddressArr, false}}, nil), + "nestedArray2": NewMethod("nestedArray2", "nestedArray2", Function, "", false, false, []Argument{{"a", Uint8ArrNested, false}}, nil), + "nestedSlice": NewMethod("nestedSlice", "nestedSlice", Function, "", false, false, []Argument{{"a", Uint8SliceNested, false}}, nil), + "receive": NewMethod("receive", "receive", Function, "payable", false, true, []Argument{{"memo", Bytes, false}}, []Argument{}), + "fixedArrStr": NewMethod("fixedArrStr", "fixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}}, nil), + "fixedArrBytes": NewMethod("fixedArrBytes", "fixedArrBytes", Function, "view", false, false, []Argument{{"bytes", Bytes, false}, {"fixedArr", Uint256Arr2, false}}, nil), + "mixedArrStr": NewMethod("mixedArrStr", "mixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}}, nil), + "doubleFixedArrStr": NewMethod("doubleFixedArrStr", "doubleFixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"fixedArr2", Uint256Arr3, false}}, nil), + "multipleMixedArrStr": NewMethod("multipleMixedArrStr", "multipleMixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}, {"fixedArr2", Uint256Arr3, false}}, nil), + "overloadedNames": NewMethod("overloadedNames", "overloadedNames", Function, "view", false, false, []Argument{{"f", TupleF, false}}, nil), +} func TestReader(t *testing.T) { - Uint256, _ := NewType("uint256", "", nil) - exp := ABI{ - Methods: map[string]Method{ - "balance": { - "balance", "balance", true, nil, nil, - }, - "send": { - "send", "send", false, []Argument{ - {"amount", Uint256, false}, - }, nil, - }, - }, + abi := ABI{ + Methods: methods, } - abi, err := JSON(strings.NewReader(jsondata)) + exp, err := JSON(strings.NewReader(jsondata)) if err != nil { - t.Error(err) + t.Fatal(err) } - // deep equal fails for some reason for name, expM := range exp.Methods { gotM, exist := abi.Methods[name] if !exist { @@ -98,8 +147,55 @@ func TestReader(t *testing.T) { } } +func TestInvalidABI(t *testing.T) { + json := `[{ "type" : "function", "name" : "", "constant" : fals }]` + _, err := JSON(strings.NewReader(json)) + if err == nil { + t.Fatal("invalid json should produce error") + } + json2 := `[{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "typ" : "uint256" } ] }]` + _, err = JSON(strings.NewReader(json2)) + if err == nil { + t.Fatal("invalid json should produce error") + } +} + +// TestConstructor tests a constructor function. +// The test is based on the following contract: +// contract TestConstructor { +// constructor(uint256 a, uint256 b) public{} +// } +func TestConstructor(t *testing.T) { + json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` + method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil) + // Test from JSON + abi, err := JSON(strings.NewReader(json)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(abi.Constructor, method) { + t.Error("Missing expected constructor") + } + // Test pack/unpack + packed, err := abi.Pack("", big.NewInt(1), big.NewInt(2)) + if err != nil { + t.Error(err) + } + unpacked, err := abi.Constructor.Inputs.Unpack(packed) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(unpacked[0], big.NewInt(1)) { + t.Error("Unable to pack/unpack from constructor") + } + if !reflect.DeepEqual(unpacked[1], big.NewInt(2)) { + t.Error("Unable to pack/unpack from constructor") + } +} + func TestTestNumbers(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } @@ -135,60 +231,22 @@ func TestTestNumbers(t *testing.T) { } } -func TestTestString(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - - if _, err := abi.Pack("string", "hello world"); err != nil { - t.Error(err) - } -} - -func TestTestBool(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - - if _, err := abi.Pack("bool", true); err != nil { - t.Error(err) - } -} - -func TestTestSlice(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - slice := make([]uint64, 2) - if _, err := abi.Pack("uint64[2]", slice); err != nil { - t.Error(err) - } - if _, err := abi.Pack("uint64[]", slice); err != nil { - t.Error(err) - } -} - func TestMethodSignature(t *testing.T) { - String, _ := NewType("string", "", nil) - m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil} + m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil) exp := "foo(string,string)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } idexp := crypto.Keccak256([]byte(exp))[:4] - if !bytes.Equal(m.ID(), idexp) { - t.Errorf("expected ids to match %x != %x", m.ID(), idexp) + if !bytes.Equal(m.ID, idexp) { + t.Errorf("expected ids to match %x != %x", m.ID, idexp) } - uintt, _ := NewType("uint256", "", nil) - m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil} + m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", Uint256, false}}, nil) exp = "foo(uint256)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } // Method with tuple arguments @@ -204,10 +262,10 @@ func TestMethodSignature(t *testing.T) { {Name: "y", Type: "int256"}, }}, }) - m = Method{"foo", "foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil} + m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil) exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } } @@ -219,12 +277,12 @@ func TestOverloadedMethodSignature(t *testing.T) { } check := func(name string, expect string, method bool) { if method { - if abi.Methods[name].Sig() != expect { - t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig()) + if abi.Methods[name].Sig != expect { + t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig) } } else { - if abi.Events[name].Sig() != expect { - t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig()) + if abi.Events[name].Sig != expect { + t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig) } } } @@ -235,7 +293,7 @@ func TestOverloadedMethodSignature(t *testing.T) { } func TestMultiPack(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } @@ -400,15 +458,7 @@ func TestInputVariableInputLength(t *testing.T) { } func TestInputFixedArrayAndVariableInputLength(t *testing.T) { - const definition = `[ - { "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] }, - { "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] }, - { "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] } - ]` - - abi, err := JSON(strings.NewReader(definition)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Error(err) } @@ -555,7 +605,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) { strvalue = common.RightPadBytes([]byte(strin), 32) fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32) fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32) - dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32))) + dynarroffset = math.U256Bytes(big.NewInt(int64(256 + ((len(strin)/32)+1)*32))) dynarrlength = make([]byte, 32) dynarrlength[31] = byte(len(dynarrin)) dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32) @@ -582,7 +632,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) { } func TestDefaultFunctionParsing(t *testing.T) { - const definition = `[{ "name" : "balance" }]` + const definition = `[{ "name" : "balance", "type" : "function" }]` abi, err := JSON(strings.NewReader(definition)) if err != nil { @@ -602,8 +652,6 @@ func TestBareEvents(t *testing.T) { { "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] } ]` - arg0, _ := NewType("uint256", "", nil) - arg1, _ := NewType("address", "", nil) tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}}) expectedEvents := map[string]struct { @@ -613,12 +661,12 @@ func TestBareEvents(t *testing.T) { "balance": {false, nil}, "anon": {true, nil}, "args": {false, []Argument{ - {Name: "arg0", Type: arg0, Indexed: false}, - {Name: "arg1", Type: arg1, Indexed: true}, + {Name: "arg0", Type: Uint256, Indexed: false}, + {Name: "arg1", Type: Address, Indexed: true}, }}, "tuple": {false, []Argument{ {Name: "t", Type: tuple, Indexed: false}, - {Name: "arg1", Type: arg1, Indexed: true}, + {Name: "arg1", Type: Address, Indexed: true}, }}, } @@ -692,7 +740,7 @@ func TestUnpackEvent(t *testing.T) { } var ev ReceivedEvent - err = abi.Unpack(&ev, "received", data) + err = abi.UnpackIntoInterface(&ev, "received", data) if err != nil { t.Error(err) } @@ -701,7 +749,7 @@ func TestUnpackEvent(t *testing.T) { Sender common.Address } var receivedAddrEv ReceivedAddrEvent - err = abi.Unpack(&receivedAddrEv, "receivedAddr", data) + err = abi.UnpackIntoInterface(&receivedAddrEv, "receivedAddr", data) if err != nil { t.Error(err) } @@ -891,45 +939,25 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) { } func TestABI_MethodById(t *testing.T) { - const abiJSON = `[ - {"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"}, - {"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]}, - {"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]}, - {"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]}, - {"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]}, - {"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]}, - {"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]}, - {"type":"function","name":"balance","constant":true}, - {"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]}, - {"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]}, - {"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]}, - {"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]}, - {"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]}, - {"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]}, - {"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]}, - {"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]}, - {"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]}, - {"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]}, - {"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]}, - {"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]}, - {"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]} - ] -` - abi, err := JSON(strings.NewReader(abiJSON)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } for name, m := range abi.Methods { a := fmt.Sprintf("%v", m) - m2, err := abi.MethodById(m.ID()) + m2, err := abi.MethodById(m.ID) if err != nil { t.Fatalf("Failed to look up ABI method: %v", err) } b := fmt.Sprintf("%v", m2) if a != b { - t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID()) + t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID) } } + // test unsuccessful lookups + if _, err = abi.MethodById(crypto.Keccak256()); err == nil { + t.Error("Expected error: no method with this id") + } // Also test empty if _, err := abi.MethodById([]byte{0x00}); err == nil { t.Errorf("Expected error, too short to decode data") @@ -995,8 +1023,8 @@ func TestABI_EventById(t *testing.T) { t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum) } - if event.ID() != topicID { - t.Errorf("Event id %s does not match topic %s, test #%d", event.ID().Hex(), topicID.Hex(), testnum) + if event.ID != topicID { + t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum) } unknowntopicID := crypto.Keccak256Hash([]byte("unknownEvent")) @@ -1010,8 +1038,10 @@ func TestABI_EventById(t *testing.T) { } } -func TestDuplicateMethodNames(t *testing.T) { - abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` +// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name +// conflict and that the second transfer method will be renamed transfer1. +func TestDoubleDuplicateMethodNames(t *testing.T) { + abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` contractAbi, err := JSON(strings.NewReader(abiJSON)) if err != nil { t.Fatal(err) @@ -1030,24 +1060,86 @@ func TestDuplicateMethodNames(t *testing.T) { } } -// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name -// conflict and that the second transfer method will be renamed transfer1. -func TestDoubleDuplicateMethodNames(t *testing.T) { - abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` +// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name +// conflict and that the second send event will be renamed send1. +// The test runs the abi of the following contract. +// contract DuplicateEvent { +// event send(uint256 a); +// event send0(); +// event send(); +// } +func TestDoubleDuplicateEventNames(t *testing.T) { + abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]` contractAbi, err := JSON(strings.NewReader(abiJSON)) if err != nil { t.Fatal(err) } - if _, ok := contractAbi.Methods["transfer"]; !ok { - t.Fatalf("Could not find original method") + if _, ok := contractAbi.Events["send"]; !ok { + t.Fatalf("Could not find original event") } - if _, ok := contractAbi.Methods["transfer0"]; !ok { - t.Fatalf("Could not find duplicate method") + if _, ok := contractAbi.Events["send0"]; !ok { + t.Fatalf("Could not find duplicate event") } - if _, ok := contractAbi.Methods["transfer1"]; !ok { - t.Fatalf("Could not find duplicate method") + if _, ok := contractAbi.Events["send1"]; !ok { + t.Fatalf("Could not find duplicate event") } - if _, ok := contractAbi.Methods["transfer2"]; ok { - t.Fatalf("Should not have found extra method") + if _, ok := contractAbi.Events["send2"]; ok { + t.Fatalf("Should not have found extra event") + } +} + +// TestUnnamedEventParam checks that an event with unnamed parameters is +// correctly handled. +// The test runs the abi of the following contract. +// contract TestEvent { +// event send(uint256, uint256); +// } +func TestUnnamedEventParam(t *testing.T) { + abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]` + contractAbi, err := JSON(strings.NewReader(abiJSON)) + if err != nil { + t.Fatal(err) + } + + event, ok := contractAbi.Events["send"] + if !ok { + t.Fatalf("Could not find event") + } + if event.Inputs[0].Name != "arg0" { + t.Fatalf("Could not find input") + } + if event.Inputs[1].Name != "arg1" { + t.Fatalf("Could not find input") + } +} + +func TestUnpackRevert(t *testing.T) { + t.Parallel() + + var cases = []struct { + input string + expect string + expectErr error + }{ + {"", "", errors.New("invalid data for unpacking")}, + {"08c379a1", "", errors.New("invalid data for unpacking")}, + {"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil}, + } + for index, c := range cases { + t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { + got, err := UnpackRevert(common.Hex2Bytes(c.input)) + if c.expectErr != nil { + if err == nil { + t.Fatalf("Expected non-nil error") + } + if err.Error() != c.expectErr.Error() { + t.Fatalf("Expected error mismatch, want %v, got %v", c.expectErr, err) + } + return + } + if c.expect != got { + t.Fatalf("Output mismatch, want %v, got %v", c.expect, got) + } + }) } } diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index f8ec11b9fa89..e6d52455965d 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -41,7 +41,7 @@ type ArgumentMarshaling struct { Indexed bool } -// UnmarshalJSON implements json.Unmarshaler interface +// UnmarshalJSON implements json.Unmarshaler interface. func (argument *Argument) UnmarshalJSON(data []byte) error { var arg ArgumentMarshaling err := json.Unmarshal(data, &arg) @@ -59,19 +59,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error { return nil } -// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events -// can ever have 'indexed' arguments, it should always be false on arguments for method input/output -func (arguments Arguments) LengthNonIndexed() int { - out := 0 - for _, arg := range arguments { - if !arg.Indexed { - out++ - } - } - return out -} - -// NonIndexed returns the arguments with indexed arguments filtered out +// NonIndexed returns the arguments with indexed arguments filtered out. func (arguments Arguments) NonIndexed() Arguments { var ret []Argument for _, arg := range arguments { @@ -82,216 +70,127 @@ func (arguments Arguments) NonIndexed() Arguments { return ret } -// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[] +// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]. func (arguments Arguments) isTuple() bool { return len(arguments) > 1 } -// Unpack performs the operation hexdata -> Go format -func (arguments Arguments) Unpack(v interface{}, data []byte) error { +// Unpack performs the operation hexdata -> Go format. +func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { if len(data) == 0 { if len(arguments) != 0 { - return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") - } else { - return nil // Nothing to unmarshal, return + return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") } + // Nothing to unmarshal, return default variables + nonIndexedArgs := arguments.NonIndexed() + defaultVars := make([]interface{}, len(nonIndexedArgs)) + for index, arg := range nonIndexedArgs { + defaultVars[index] = reflect.New(arg.Type.GetType()) + } + return defaultVars, nil } - // make sure the passed value is arguments pointer - if reflect.Ptr != reflect.ValueOf(v).Kind() { - return fmt.Errorf("abi: Unpack(non-pointer %T)", v) - } - marshalledValues, err := arguments.UnpackValues(data) - if err != nil { - return err - } - if arguments.isTuple() { - return arguments.unpackTuple(v, marshalledValues) - } - return arguments.unpackAtomic(v, marshalledValues[0]) + return arguments.UnpackValues(data) } -// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value +// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value. func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { + // Make sure map is not nil + if v == nil { + return fmt.Errorf("abi: cannot unpack into a nil map") + } if len(data) == 0 { if len(arguments) != 0 { return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") - } else { - return nil // Nothing to unmarshal, return } + return nil // Nothing to unmarshal, return } marshalledValues, err := arguments.UnpackValues(data) if err != nil { return err } - return arguments.unpackIntoMap(v, marshalledValues) -} - -// unpack sets the unmarshalled value to go format. -// Note the dst here must be settable. -func unpack(t *Type, dst interface{}, src interface{}) error { - var ( - dstVal = reflect.ValueOf(dst).Elem() - srcVal = reflect.ValueOf(src) - ) - tuple, typ := false, t - for { - if typ.T == SliceTy || typ.T == ArrayTy { - typ = typ.Elem - continue - } - tuple = typ.T == TupleTy - break - } - if !tuple { - return set(dstVal, srcVal) - } - - // Dereferences interface or pointer wrapper - dstVal = indirectInterfaceOrPtr(dstVal) - - switch t.T { - case TupleTy: - if dstVal.Kind() != reflect.Struct { - return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind()) - } - fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal) - if err != nil { - return err - } - for i, elem := range t.TupleElems { - fname := fieldmap[t.TupleRawNames[i]] - field := dstVal.FieldByName(fname) - if !field.IsValid() { - return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i]) - } - if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil { - return err - } - } - return nil - case SliceTy: - if dstVal.Kind() != reflect.Slice { - return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind()) - } - slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len()) - for i := 0; i < slice.Len(); i++ { - if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil { - return err - } - } - dstVal.Set(slice) - case ArrayTy: - if dstVal.Kind() != reflect.Array { - return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind()) - } - array := reflect.New(dstVal.Type()).Elem() - for i := 0; i < array.Len(); i++ { - if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil { - return err - } - } - dstVal.Set(array) + for i, arg := range arguments.NonIndexed() { + v[arg.Name] = marshalledValues[i] } return nil } -// unpackIntoMap unpacks marshalledValues into the provided map[string]interface{} -func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error { - // Make sure map is not nil - if v == nil { - return fmt.Errorf("abi: cannot unpack into a nil map") +// Copy performs the operation go format -> provided struct. +func (arguments Arguments) Copy(v interface{}, values []interface{}) error { + // make sure the passed value is arguments pointer + if reflect.Ptr != reflect.ValueOf(v).Kind() { + return fmt.Errorf("abi: Unpack(non-pointer %T)", v) } - - for i, arg := range arguments.NonIndexed() { - v[arg.Name] = marshalledValues[i] + if len(values) == 0 { + if len(arguments) != 0 { + return fmt.Errorf("abi: attempting to copy no values while %d arguments are expected", len(arguments)) + } + return nil // Nothing to copy, return } - return nil + if arguments.isTuple() { + return arguments.copyTuple(v, values) + } + return arguments.copyAtomic(v, values[0]) } // unpackAtomic unpacks ( hexdata -> go ) a single value -func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error { - if arguments.LengthNonIndexed() == 0 { - return nil - } - argument := arguments.NonIndexed()[0] - elem := reflect.ValueOf(v).Elem() +func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error { + dst := reflect.ValueOf(v).Elem() + src := reflect.ValueOf(marshalledValues) - if elem.Kind() == reflect.Struct && argument.Type.T != TupleTy { - fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem) - if err != nil { - return err - } - field := elem.FieldByName(fieldmap[argument.Name]) - if !field.IsValid() { - return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name) - } - return unpack(&argument.Type, field.Addr().Interface(), marshalledValues) + if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct { + return set(dst.Field(0), src) } - return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues) + return set(dst, src) } -// unpackTuple unpacks ( hexdata -> go ) a batch of values. -func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error { - var ( - value = reflect.ValueOf(v).Elem() - typ = value.Type() - kind = value.Kind() - ) - if err := requireUnpackKind(value, typ, kind, arguments); err != nil { - return err - } +// copyTuple copies a batch of values from marshalledValues to v. +func (arguments Arguments) copyTuple(v interface{}, marshalledValues []interface{}) error { + value := reflect.ValueOf(v).Elem() + nonIndexedArgs := arguments.NonIndexed() - // If the interface is a struct, get of abi->struct_field mapping - var abi2struct map[string]string - if kind == reflect.Struct { - var ( - argNames []string - err error - ) - for _, arg := range arguments.NonIndexed() { - argNames = append(argNames, arg.Name) + switch value.Kind() { + case reflect.Struct: + argNames := make([]string, len(nonIndexedArgs)) + for i, arg := range nonIndexedArgs { + argNames[i] = arg.Name } - abi2struct, err = mapArgNamesToStructFields(argNames, value) + var err error + abi2struct, err := mapArgNamesToStructFields(argNames, value) if err != nil { return err } - } - for i, arg := range arguments.NonIndexed() { - switch kind { - case reflect.Struct: + for i, arg := range nonIndexedArgs { field := value.FieldByName(abi2struct[arg.Name]) if !field.IsValid() { return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name) } - if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil { + if err := set(field, reflect.ValueOf(marshalledValues[i])); err != nil { return err } - case reflect.Slice, reflect.Array: - if value.Len() < i { - return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len()) - } - v := value.Index(i) - if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil { - return err - } - if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil { + } + case reflect.Slice, reflect.Array: + if value.Len() < len(marshalledValues) { + return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len()) + } + for i := range nonIndexedArgs { + if err := set(value.Index(i), reflect.ValueOf(marshalledValues[i])); err != nil { return err } - default: - return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", typ) } + default: + return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", value.Type()) } return nil - } // UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification, // without supplying a struct to unpack into. Instead, this method returns a list containing the // values. An atomic argument will be a list with one element. func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) { - retval := make([]interface{}, 0, arguments.LengthNonIndexed()) + nonIndexedArgs := arguments.NonIndexed() + retval := make([]interface{}, 0, len(nonIndexedArgs)) virtualArgs := 0 - for index, arg := range arguments.NonIndexed() { + for index, arg := range nonIndexedArgs { marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data) if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) { // If we have a static array, like [3]uint256, these are coded as @@ -318,18 +217,18 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) { return retval, nil } -// PackValues performs the operation Go format -> Hexdata -// It is the semantic opposite of UnpackValues +// PackValues performs the operation Go format -> Hexdata. +// It is the semantic opposite of UnpackValues. func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) { return arguments.Pack(args...) } -// Pack performs the operation Go format -> Hexdata +// Pack performs the operation Go format -> Hexdata. func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) { // Make sure arguments match up and pack them abiArgs := arguments if len(args) != len(abiArgs) { - return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs)) + return nil, fmt.Errorf("argument count mismatch: got %d for %d", len(args), len(abiArgs)) } // variable input is the output appended at the end of packed // output. This is used for strings and bytes types input. diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index e51f0bd8ead5..1190772676c5 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -21,6 +21,7 @@ import ( "errors" "io" "io/ioutil" + "math/big" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/external" @@ -28,11 +29,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) +// ErrNoChainID is returned whenever the user failed to specify a chain id. +var ErrNoChainID = errors.New("no chain id specified") + +// ErrNotAuthorized is returned when an account is not properly unlocked. +var ErrNotAuthorized = errors.New("not authorized to sign this account") + // NewTransactor is a utility method to easily create a transaction signer from // an encrypted json key stream and the associated passphrase. +// +// Deprecated: Use NewTransactorWithChainID instead. func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { + log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") json, err := ioutil.ReadAll(keyin) if err != nil { return nil, err @@ -45,13 +56,17 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { } // NewKeyStoreTransactor is a utility method to easily create a transaction signer from -// an decrypted key from a keystore +// an decrypted key from a keystore. +// +// Deprecated: Use NewKeyStoreTransactorWithChainID instead. func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) { + log.Warn("WARNING: NewKeyStoreTransactor has been deprecated in favour of NewTransactorWithChainID") + signer := types.HomesteadSigner{} return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) if err != nil { @@ -64,13 +79,17 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account // NewKeyedTransactor is a utility method to easily create a transaction signer // from a single private key. +// +// Deprecated: Use NewKeyedTransactorWithChainID instead. func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { + log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") keyAddr := crypto.PubkeyToAddress(key.PublicKey) + signer := types.HomesteadSigner{} return &TransactOpts{ From: keyAddr, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != keyAddr { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) if err != nil { @@ -81,14 +100,73 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { } } +// NewTransactorWithChainID is a utility method to easily create a transaction signer from +// an encrypted json key stream and the associated passphrase. +func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { + json, err := ioutil.ReadAll(keyin) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(json, passphrase) + if err != nil { + return nil, err + } + return NewKeyedTransactorWithChainID(key.PrivateKey, chainID) +} + +// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from +// an decrypted key from a keystore. +func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) { + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.NewEIP155Signer(chainID) + return &TransactOpts{ + From: account.Address, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != account.Address { + return nil, ErrNotAuthorized + } + signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} + +// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer +// from a single private key. +func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { + keyAddr := crypto.PubkeyToAddress(key.PublicKey) + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.NewEIP155Signer(chainID) + return &TransactOpts{ + From: keyAddr, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != keyAddr { + return nil, ErrNotAuthorized + } + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} + // NewClefTransactor is a utility method to easily create a transaction signer // with a clef backend. func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) *TransactOpts { return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, transaction *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id }, diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index ca60cc1b4320..eed6a44bbcf0 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -41,7 +41,7 @@ var ( ErrNoCodeAfterDeploy = errors.New("no contract code after deployment") ) -// ContractCaller defines the methods needed to allow operating with contract on a read +// ContractCaller defines the methods needed to allow operating with a contract on a read // only basis. type ContractCaller interface { // CodeAt returns the code of the given account. This is needed to differentiate @@ -62,8 +62,8 @@ type PendingContractCaller interface { PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) } -// ContractTransactor defines the methods needed to allow operating with contract -// on a write only basis. Beside the transacting method, the remainder are helpers +// ContractTransactor defines the methods needed to allow operating with a contract +// on a write only basis. Besides the transacting method, the remainder are helpers // used when the user does not provide some needed values, but rather leaves it up // to the transactor to decide. type ContractTransactor interface { diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index e30572e0a111..6e87e037f05b 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -25,8 +25,10 @@ import ( "time" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -38,27 +40,32 @@ import ( "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) -// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend. +// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend. var _ bind.ContractBackend = (*SimulatedBackend)(nil) var ( - errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block") - errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction") + errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block") + errBlockDoesNotExist = errors.New("block does not exist in blockchain") + errTransactionDoesNotExist = errors.New("transaction does not exist") ) // SimulatedBackend implements bind.ContractBackend, simulating a blockchain in -// the background. Its main purpose is to allow easily testing contract bindings. +// the background. Its main purpose is to allow for easy testing of contract bindings. +// Simulated backend implements the following interfaces: +// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor, +// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender type SimulatedBackend struct { database ethdb.Database // In memory database to store our testing data blockchain *core.BlockChain // Ethereum blockchain to handle the consensus mu sync.Mutex pendingBlock *types.Block // Currently pending block that will be imported on request - pendingState *state.StateDB // Currently pending state that will be the active on on request + pendingState *state.StateDB // Currently pending state that will be the active on request events *filters.EventSystem // Event system for filtering log events live @@ -67,16 +74,17 @@ type SimulatedBackend struct { // NewSimulatedBackendWithDatabase creates a new binding backend based on the given database // and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) - blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil) + blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil) backend := &SimulatedBackend{ database: database, blockchain: blockchain, config: genesis.Config, - events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false), + events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } backend.rollback() return backend @@ -84,6 +92,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit) } @@ -116,10 +125,22 @@ func (b *SimulatedBackend) Rollback() { func (b *SimulatedBackend) rollback() { blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) - statedb, _ := b.blockchain.State() + stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) +} + +// stateByBlockNumber retrieves a state by a given blocknumber. +func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { + if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { + return b.blockchain.State() + } + block, err := b.blockByNumberNoLock(ctx, blockNumber) + if err != nil { + return nil, err + } + return b.blockchain.StateAt(block.Root()) } // CodeAt returns the code associated with a certain account in the blockchain. @@ -127,11 +148,12 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - return statedb.GetCode(contract), nil + + return stateDB.GetCode(contract), nil } // BalanceAt returns the wei balance of a certain account in the blockchain. @@ -139,11 +161,12 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - return statedb.GetBalance(contract), nil + + return stateDB.GetBalance(contract), nil } // NonceAt returns the nonce of a certain account in the blockchain. @@ -151,11 +174,12 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return 0, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return 0, err } - statedb, _ := b.blockchain.State() - return statedb.GetNonce(contract), nil + + return stateDB.GetNonce(contract), nil } // StorageAt returns the value of key in the storage of an account in the blockchain. @@ -163,16 +187,20 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - val := statedb.GetState(contract, key) + + val := stateDB.GetState(contract, key) return val[:], nil } // TransactionReceipt returns the receipt of a transaction. func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + b.mu.Lock() + defer b.mu.Unlock() + receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config) return receipt, nil } @@ -196,6 +224,121 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common. return nil, false, ethereum.NotFound } +// BlockByHash retrieves a block based on the block hash. +func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if hash == b.pendingBlock.Hash() { + return b.pendingBlock, nil + } + + block := b.blockchain.GetBlockByHash(hash) + if block != nil { + return block, nil + } + + return nil, errBlockDoesNotExist +} + +// BlockByNumber retrieves a block from the database by number, caching it +// (associated with its hash) if found. +func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + b.mu.Lock() + defer b.mu.Unlock() + + return b.blockByNumberNoLock(ctx, number) +} + +// blockByNumberNoLock retrieves a block from the database by number, caching it +// (associated with its hash) if found without Lock. +func (b *SimulatedBackend) blockByNumberNoLock(ctx context.Context, number *big.Int) (*types.Block, error) { + if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { + return b.blockchain.CurrentBlock(), nil + } + + block := b.blockchain.GetBlockByNumber(uint64(number.Int64())) + if block == nil { + return nil, errBlockDoesNotExist + } + + return block, nil +} + +// HeaderByHash returns a block header from the current canonical chain. +func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if hash == b.pendingBlock.Hash() { + return b.pendingBlock.Header(), nil + } + + header := b.blockchain.GetHeaderByHash(hash) + if header == nil { + return nil, errBlockDoesNotExist + } + + return header, nil +} + +// HeaderByNumber returns a block header from the current canonical chain. If number is +// nil, the latest known header is returned. +func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 { + return b.blockchain.CurrentHeader(), nil + } + + return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil +} + +// TransactionCount returns the number of transactions in a given block. +func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if blockHash == b.pendingBlock.Hash() { + return uint(b.pendingBlock.Transactions().Len()), nil + } + + block := b.blockchain.GetBlockByHash(blockHash) + if block == nil { + return uint(0), errBlockDoesNotExist + } + + return uint(block.Transactions().Len()), nil +} + +// TransactionInBlock returns the transaction for a specific block at a specific index. +func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if blockHash == b.pendingBlock.Hash() { + transactions := b.pendingBlock.Transactions() + if uint(len(transactions)) < index+1 { + return nil, errTransactionDoesNotExist + } + + return transactions[index], nil + } + + block := b.blockchain.GetBlockByHash(blockHash) + if block == nil { + return nil, errBlockDoesNotExist + } + + transactions := block.Transactions() + if uint(len(transactions)) < index+1 { + return nil, errTransactionDoesNotExist + } + + return transactions[index], nil +} + // PendingCodeAt returns the code associated with an account in the pending state. func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { b.mu.Lock() @@ -204,6 +347,36 @@ func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Ad return b.pendingState.GetCode(contract), nil } +func newRevertError(result *core.ExecutionResult) *revertError { + reason, errUnpack := abi.UnpackRevert(result.Revert()) + err := errors.New("execution reverted") + if errUnpack == nil { + err = fmt.Errorf("execution reverted: %v", reason) + } + return &revertError{ + error: err, + reason: hexutil.Encode(result.Revert()), + } +} + +// revertError is an API error that encompasses an EVM revert with JSON error +// code and a binary data blob. +type revertError struct { + error + reason string // revert reason hex encoded +} + +// ErrorCode returns the JSON error code for a revert. +// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal +func (e *revertError) ErrorCode() int { + return 3 +} + +// ErrorData returns the hex encoded revert reason. +func (e *revertError) ErrorData() interface{} { + return e.reason +} + // CallContract executes a contract call. func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { b.mu.Lock() @@ -212,12 +385,19 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { return nil, errBlockNumberUnsupported } - state, err := b.blockchain.State() + stateDB, err := b.blockchain.State() if err != nil { return nil, err } - rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state) - return rval, err + res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), stateDB) + if err != nil { + return nil, err + } + // If the result contains a revert reason, try to unpack and return it. + if len(res.Revert()) > 0 { + return nil, newRevertError(res) + } + return res.Return(), res.Err } // PendingCallContract executes a contract call on the pending state. @@ -226,8 +406,15 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu defer b.mu.Unlock() defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot()) - rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) - return rval, err + res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) + if err != nil { + return nil, err + } + // If the result contains a revert reason, try to unpack and return it. + if len(res.Revert()) > 0 { + return nil, newRevertError(res) + } + return res.Return(), res.Err } // PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving @@ -262,25 +449,57 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } else { hi = b.pendingBlock.GasLimit() } + // Recap the highest gas allowance with account's balance. + if call.GasPrice != nil && call.GasPrice.BitLen() != 0 { + balance := b.pendingState.GetBalance(call.From) // from can't be nil + available := new(big.Int).Set(balance) + if call.Value != nil { + if call.Value.Cmp(available) >= 0 { + return 0, errors.New("insufficient funds for transfer") + } + available.Sub(available, call.Value) + } + allowance := new(big.Int).Div(available, call.GasPrice) + if allowance.IsUint64() && hi > allowance.Uint64() { + transfer := call.Value + if transfer == nil { + transfer = new(big.Int) + } + log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + "sent", transfer, "gasprice", call.GasPrice, "fundable", allowance) + hi = allowance.Uint64() + } + } cap = hi // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64) bool { + executable := func(gas uint64) (bool, *core.ExecutionResult, error) { call.Gas = gas snapshot := b.pendingState.Snapshot() - _, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) + res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) b.pendingState.RevertToSnapshot(snapshot) - if err != nil || failed { - return false + if err != nil { + if errors.Is(err, core.ErrIntrinsicGas) { + return true, nil, nil // Special case, raise gas limit + } + return true, nil, err // Bail out } - return true + return res.Failed(), res, nil } // Execute the binary search and hone in on an executable gas limit for lo+1 < hi { mid := (hi + lo) / 2 - if !executable(mid) { + failed, _, err := executable(mid) + + // If the error is not nil(consensus error), it means the provided message + // call or transaction will never be accepted no matter how much gas it is + // assigned. Return the error directly, don't struggle any more + if err != nil { + return 0, err + } + if failed { lo = mid } else { hi = mid @@ -288,8 +507,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } // Reject the transaction as invalid if it still fails at the highest allowance if hi == cap { - if !executable(hi) { - return 0, errGasEstimationFailed + failed, result, err := executable(hi) + if err != nil { + return 0, err + } + if failed { + if result != nil && result.Err != vm.ErrOutOfGas { + if len(result.Revert()) > 0 { + return 0, newRevertError(result) + } + return 0, result.Err + } + // Otherwise, the specified gas cap is too low + return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) } } return hi, nil @@ -297,7 +527,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs // callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. -func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) { +func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { // Ensure message is initialized properly. if call.GasPrice == nil { call.GasPrice = big.NewInt(1) @@ -309,18 +539,19 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM call.Value = new(big.Int) } // Set infinite balance to the fake caller account. - from := statedb.GetOrNewStateObject(call.From) + from := stateDB.GetOrNewStateObject(call.From) from.SetBalance(math.MaxBig256) // Execute the call. - msg := callmsg{call} + msg := callMsg{call} - evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil) + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{}) - gaspool := new(core.GasPool).AddGas(math.MaxUint64) + vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{}) + gasPool := new(core.GasPool).AddGas(math.MaxUint64) - return core.NewStateTransition(vmenv, msg, gaspool).TransitionDb() + return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() } // SendTransaction updates the pending block to include the given transaction. @@ -344,10 +575,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa } block.AddTxWithChain(b.blockchain, tx) }) - statedb, _ := b.blockchain.State() + stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) return nil } @@ -361,7 +592,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter // Block filter requested, construct a single-shot filter filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) } else { - // Initialize unset filter boundaried to run from genesis to chain head + // Initialize unset filter boundaries to run from genesis to chain head from := int64(0) if query.FromBlock != nil { from = query.FromBlock.Int64() @@ -379,8 +610,8 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter return nil, err } res := make([]types.Log, len(logs)) - for i, log := range logs { - res[i] = *log + for i, nLog := range logs { + res[i] = *nLog } return res, nil } @@ -401,9 +632,9 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere for { select { case logs := <-sink: - for _, log := range logs { + for _, nlog := range logs { select { - case ch <- *log: + case ch <- *nlog: case err := <-sub.Err(): return err case <-quit: @@ -419,20 +650,50 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere }), nil } +// SubscribeNewHead returns an event subscription for a new header. +func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { + // subscribe to a new head + sink := make(chan *types.Header) + sub := b.events.SubscribeNewHeads(sink) + + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case head := <-sink: + select { + case ch <- head: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + // AdjustTime adds a time shift to the simulated clock. +// It can only be called on empty blocks. func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { b.mu.Lock() defer b.mu.Unlock() + + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("Could not adjust time on non-empty block") + } + blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { - for _, tx := range b.pendingBlock.Transactions() { - block.AddTx(tx) - } block.OffsetTime(int64(adjustment.Seconds())) }) - statedb, _ := b.blockchain.State() + stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) return nil } @@ -442,19 +703,19 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain { return b.blockchain } -// callmsg implements core.Message to allow passing it as a transaction simulator. -type callmsg struct { +// callMsg implements core.Message to allow passing it as a transaction simulator. +type callMsg struct { ethereum.CallMsg } -func (m callmsg) From() common.Address { return m.CallMsg.From } -func (m callmsg) Nonce() uint64 { return 0 } -func (m callmsg) CheckNonce() bool { return false } -func (m callmsg) To() *common.Address { return m.CallMsg.To } -func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } -func (m callmsg) Gas() uint64 { return m.CallMsg.Gas } -func (m callmsg) Value() *big.Int { return m.CallMsg.Value } -func (m callmsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) From() common.Address { return m.CallMsg.From } +func (m callMsg) Nonce() uint64 { return 0 } +func (m callMsg) CheckNonce() bool { return false } +func (m callMsg) To() *common.Address { return m.CallMsg.To } +func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } +func (m callMsg) Value() *big.Int { return m.CallMsg.Value } +func (m callMsg) Data() []byte { return m.CallMsg.Data } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. @@ -502,22 +763,34 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty } func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return event.NewSubscription(func(quit <-chan struct{}) error { - <-quit - return nil - }) + return nullSubscription() } + func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return fb.bc.SubscribeChainEvent(ch) } + func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { return fb.bc.SubscribeRemovedLogsEvent(ch) } + func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return fb.bc.SubscribeLogsEvent(ch) } +func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return nullSubscription() +} + func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } + func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { panic("not supported") } + +func nullSubscription() event.Subscription { + return event.NewSubscription(func(quit <-chan struct{}) error { + <-quit + return nil + }) +} diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 785b25d10fe2..64ddf8bb2c3c 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -14,30 +14,36 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package backends_test +package backends import ( + "bytes" "context" + "errors" "math/big" + "reflect" + "strings" "testing" + "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" ) func TestSimulatedBackend(t *testing.T) { var gasLimit uint64 = 8000029 key, _ := crypto.GenerateKey() // nolint: gosec - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) genAlloc := make(core.GenesisAlloc) genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)} - sim := backends.NewSimulatedBackend(genAlloc, gasLimit) + sim := NewSimulatedBackend(genAlloc, gasLimit) defer sim.Close() // should return an error if the tx is not found @@ -79,5 +85,1032 @@ func TestSimulatedBackend(t *testing.T) { if isPending { t.Fatal("transaction should not have pending status") } +} + +var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + +// the following is based on this contract: +// contract T { +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); +// +// function receive(bytes calldata memo) external payable returns (string memory res) { +// emit received(msg.sender, msg.value, memo); +// emit receivedAddr(msg.sender); +// return "hello world"; +// } +// } +const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` +const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` +const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` + +// expected return value contains "hello world" +var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +func simTestBackend(testAddr common.Address) *SimulatedBackend { + return NewSimulatedBackend( + core.GenesisAlloc{ + testAddr: {Balance: big.NewInt(10000000000)}, + }, 10000000, + ) +} + +func TestNewSimulatedBackend(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + expectedBal := big.NewInt(10000000000) + sim := simTestBackend(testAddr) + defer sim.Close() + + if sim.config != params.AllEthashProtocolChanges { + t.Errorf("expected sim config to equal params.AllEthashProtocolChanges, got %v", sim.config) + } + + if sim.blockchain.Config() != params.AllEthashProtocolChanges { + t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config) + } + + stateDB, _ := sim.blockchain.State() + bal := stateDB.GetBalance(testAddr) + if bal.Cmp(expectedBal) != 0 { + t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal) + } +} + +func TestSimulatedBackend_AdjustTime(t *testing.T) { + sim := NewSimulatedBackend( + core.GenesisAlloc{}, 10000000, + ) + defer sim.Close() + + prevTime := sim.pendingBlock.Time() + if err := sim.AdjustTime(time.Second); err != nil { + t.Error(err) + } + newTime := sim.pendingBlock.Time() + + if newTime-prevTime != uint64(time.Second.Seconds()) { + t.Errorf("adjusted time not equal to a second. prev: %v, new: %v", prevTime, newTime) + } +} + +func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + // Create tx and send + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx) + // AdjustTime should fail on non-empty block + if err := sim.AdjustTime(time.Second); err == nil { + t.Error("Expected adjust time to error on non-empty block") + } + sim.Commit() + + prevTime := sim.pendingBlock.Time() + if err := sim.AdjustTime(time.Minute); err != nil { + t.Error(err) + } + newTime := sim.pendingBlock.Time() + if newTime-prevTime != uint64(time.Minute.Seconds()) { + t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) + } + // Put a transaction after adjusting time + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx2) + sim.Commit() + newTime = sim.pendingBlock.Time() + if newTime-prevTime >= uint64(time.Minute.Seconds()) { + t.Errorf("time adjusted, but shouldn't be: prev: %v, new: %v", prevTime, newTime) + } +} +func TestSimulatedBackend_BalanceAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + expectedBal := big.NewInt(10000000000) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + bal, err := sim.BalanceAt(bgCtx, testAddr, nil) + if err != nil { + t.Error(err) + } + + if bal.Cmp(expectedBal) != 0 { + t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal) + } +} + +func TestSimulatedBackend_BlockByHash(t *testing.T) { + sim := NewSimulatedBackend( + core.GenesisAlloc{}, 10000000, + ) + defer sim.Close() + bgCtx := context.Background() + + block, err := sim.BlockByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + blockByHash, err := sim.BlockByHash(bgCtx, block.Hash()) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + + if block.Hash() != blockByHash.Hash() { + t.Errorf("did not get expected block") + } +} + +func TestSimulatedBackend_BlockByNumber(t *testing.T) { + sim := NewSimulatedBackend( + core.GenesisAlloc{}, 10000000, + ) + defer sim.Close() + bgCtx := context.Background() + + block, err := sim.BlockByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + if block.NumberU64() != 0 { + t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64()) + } + + // create one block + sim.Commit() + + block, err = sim.BlockByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + if block.NumberU64() != 1 { + t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64()) + } + + blockByNumber, err := sim.BlockByNumber(bgCtx, big.NewInt(1)) + if err != nil { + t.Errorf("could not get block by number: %v", err) + } + if blockByNumber.Hash() != block.Hash() { + t.Errorf("did not get the same block with height of 1 as before") + } +} + +func TestSimulatedBackend_NonceAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + nonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(0)) + if err != nil { + t.Errorf("could not get nonce for test addr: %v", err) + } + + if nonce != uint64(0) { + t.Errorf("received incorrect nonce. expected 0, got %v", nonce) + } + + // create a signed transaction to send + tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + sim.Commit() + + newNonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(1)) + if err != nil { + t.Errorf("could not get nonce for test addr: %v", err) + } + + if newNonce != nonce+uint64(1) { + t.Errorf("received incorrect nonce. expected 1, got %v", nonce) + } + // create some more blocks + sim.Commit() + // Check that we can get data for an older block/state + newNonce, err = sim.NonceAt(bgCtx, testAddr, big.NewInt(1)) + if err != nil { + t.Fatalf("could not get nonce for test addr: %v", err) + } + if newNonce != nonce+uint64(1) { + t.Fatalf("received incorrect nonce. expected 1, got %v", nonce) + } +} + +func TestSimulatedBackend_SendTransaction(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + sim.Commit() + + block, err := sim.BlockByNumber(bgCtx, big.NewInt(1)) + if err != nil { + t.Errorf("could not get block at height 1: %v", err) + } + + if signedTx.Hash() != block.Transactions()[0].Hash() { + t.Errorf("did not commit sent transaction. expected hash %v got hash %v", block.Transactions()[0].Hash(), signedTx.Hash()) + } +} + +func TestSimulatedBackend_TransactionByHash(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := NewSimulatedBackend( + core.GenesisAlloc{ + testAddr: {Balance: big.NewInt(10000000000)}, + }, 10000000, + ) + defer sim.Close() + bgCtx := context.Background() + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + + // ensure tx is committed pending + receivedTx, pending, err := sim.TransactionByHash(bgCtx, signedTx.Hash()) + if err != nil { + t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err) + } + if !pending { + t.Errorf("expected transaction to be in pending state") + } + if receivedTx.Hash() != signedTx.Hash() { + t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash()) + } + + sim.Commit() + + // ensure tx is not and committed pending + receivedTx, pending, err = sim.TransactionByHash(bgCtx, signedTx.Hash()) + if err != nil { + t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err) + } + if pending { + t.Errorf("expected transaction to not be in pending state") + } + if receivedTx.Hash() != signedTx.Hash() { + t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash()) + } +} + +func TestSimulatedBackend_EstimateGas(t *testing.T) { + /* + pragma solidity ^0.6.4; + contract GasEstimation { + function PureRevert() public { revert(); } + function Revert() public { revert("revert reason");} + function OOG() public { for (uint i = 0; ; i++) {}} + function Assert() public { assert(false);} + function Valid() public {} + }*/ + const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033" + + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether)}}, 10000000) + defer sim.Close() + + parsed, _ := abi.JSON(strings.NewReader(contractAbi)) + contractAddr, _, _, _ := bind.DeployContract(opts, parsed, common.FromHex(contractBin), sim) + sim.Commit() + + var cases = []struct { + name string + message ethereum.CallMsg + expect uint64 + expectError error + expectData interface{} + }{ + {"plain transfer(valid)", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(1), + Data: nil, + }, params.TxGas, nil, nil}, + + {"plain transfer(invalid)", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(1), + Data: nil, + }, 0, errors.New("execution reverted"), nil}, + + {"Revert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("d8b98391"), + }, 0, errors.New("execution reverted: revert reason"), "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000"}, + + {"PureRevert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("aa8b1d30"), + }, 0, errors.New("execution reverted"), nil}, + + {"OOG", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("50f6fe34"), + }, 0, errors.New("gas required exceeds allowance (100000)"), nil}, + + {"Assert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("b9b046f9"), + }, 0, errors.New("invalid opcode: opcode 0xfe not defined"), nil}, + + {"Valid", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("e09fface"), + }, 21275, nil, nil}, + } + for _, c := range cases { + got, err := sim.EstimateGas(context.Background(), c.message) + if c.expectError != nil { + if err == nil { + t.Fatalf("Expect error, got nil") + } + if c.expectError.Error() != err.Error() { + t.Fatalf("Expect error, want %v, got %v", c.expectError, err) + } + if c.expectData != nil { + if err, ok := err.(*revertError); !ok { + t.Fatalf("Expect revert error, got %T", err) + } else if !reflect.DeepEqual(err.ErrorData(), c.expectData) { + t.Fatalf("Error data mismatch, want %v, got %v", c.expectData, err.ErrorData()) + } + } + continue + } + if got != c.expect { + t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got) + } + } +} + +func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + + sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether*2 + 2e17)}}, 10000000) + defer sim.Close() + + recipient := common.HexToAddress("deadbeef") + var cases = []struct { + name string + message ethereum.CallMsg + expect uint64 + expectError error + }{ + {"EstimateWithoutPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(1000), + Data: nil, + }, 21000, nil}, + + {"EstimateWithPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(1000), + Value: big.NewInt(1000), + Data: nil, + }, 21000, nil}, + + {"EstimateWithVeryHighPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(1e14), // gascost = 2.1ether + Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether + Data: nil, + }, 21000, nil}, + + {"EstimateWithSuperhighPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(2e14), // gascost = 4.2ether + Value: big.NewInt(1000), + Data: nil, + }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14) + } + for _, c := range cases { + got, err := sim.EstimateGas(context.Background(), c.message) + if c.expectError != nil { + if err == nil { + t.Fatalf("Expect error, got nil") + } + if c.expectError.Error() != err.Error() { + t.Fatalf("Expect error, want %v, got %v", c.expectError, err) + } + continue + } + if got != c.expect { + t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got) + } + } +} + +func TestSimulatedBackend_HeaderByHash(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + header, err := sim.HeaderByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + headerByHash, err := sim.HeaderByHash(bgCtx, header.Hash()) + if err != nil { + t.Errorf("could not get recent block: %v", err) + } + + if header.Hash() != headerByHash.Hash() { + t.Errorf("did not get expected block") + } +} + +func TestSimulatedBackend_HeaderByNumber(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + latestBlockHeader, err := sim.HeaderByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get header for tip of chain: %v", err) + } + if latestBlockHeader == nil { + t.Errorf("received a nil block header") + } + if latestBlockHeader.Number.Uint64() != uint64(0) { + t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64()) + } + + sim.Commit() + + latestBlockHeader, err = sim.HeaderByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get header for blockheight of 1: %v", err) + } + + blockHeader, err := sim.HeaderByNumber(bgCtx, big.NewInt(1)) + if err != nil { + t.Errorf("could not get header for blockheight of 1: %v", err) + } + + if blockHeader.Hash() != latestBlockHeader.Hash() { + t.Errorf("block header and latest block header are not the same") + } + if blockHeader.Number.Int64() != int64(1) { + t.Errorf("did not get blockheader for block 1. instead got block %v", blockHeader.Number.Int64()) + } + + block, err := sim.BlockByNumber(bgCtx, big.NewInt(1)) + if err != nil { + t.Errorf("could not get block for blockheight of 1: %v", err) + } + + if block.Hash() != blockHeader.Hash() { + t.Errorf("block hash and block header hash do not match. expected %v, got %v", block.Hash(), blockHeader.Hash()) + } +} + +func TestSimulatedBackend_TransactionCount(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + currentBlock, err := sim.BlockByNumber(bgCtx, nil) + if err != nil || currentBlock == nil { + t.Error("could not get current block") + } + + count, err := sim.TransactionCount(bgCtx, currentBlock.Hash()) + if err != nil { + t.Error("could not get current block's transaction count") + } + + if count != 0 { + t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count) + } + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + + sim.Commit() + + lastBlock, err := sim.BlockByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get header for tip of chain: %v", err) + } + + count, err = sim.TransactionCount(bgCtx, lastBlock.Hash()) + if err != nil { + t.Error("could not get current block's transaction count") + } + + if count != 1 { + t.Errorf("expected transaction count of %v does not match actual count of %v", 1, count) + } +} + +func TestSimulatedBackend_TransactionInBlock(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + transaction, err := sim.TransactionInBlock(bgCtx, sim.pendingBlock.Hash(), uint(0)) + if err == nil && err != errTransactionDoesNotExist { + t.Errorf("expected a transaction does not exist error to be received but received %v", err) + } + if transaction != nil { + t.Errorf("expected transaction to be nil but received %v", transaction) + } + + // expect pending nonce to be 0 since account has not been used + pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr) + if err != nil { + t.Errorf("did not get the pending nonce: %v", err) + } + + if pendingNonce != uint64(0) { + t.Errorf("expected pending nonce of 0 got %v", pendingNonce) + } + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + + sim.Commit() + + lastBlock, err := sim.BlockByNumber(bgCtx, nil) + if err != nil { + t.Errorf("could not get header for tip of chain: %v", err) + } + + transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(1)) + if err == nil && err != errTransactionDoesNotExist { + t.Errorf("expected a transaction does not exist error to be received but received %v", err) + } + if transaction != nil { + t.Errorf("expected transaction to be nil but received %v", transaction) + } + + transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(0)) + if err != nil { + t.Errorf("could not get transaction in the lastest block with hash %v: %v", lastBlock.Hash().String(), err) + } + + if signedTx.Hash().String() != transaction.Hash().String() { + t.Errorf("received transaction that did not match the sent transaction. expected hash %v, got hash %v", signedTx.Hash().String(), transaction.Hash().String()) + } +} + +func TestSimulatedBackend_PendingNonceAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + // expect pending nonce to be 0 since account has not been used + pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr) + if err != nil { + t.Errorf("did not get the pending nonce: %v", err) + } + + if pendingNonce != uint64(0) { + t.Errorf("expected pending nonce of 0 got %v", pendingNonce) + } + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + + // expect pending nonce to be 1 since account has submitted one transaction + pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr) + if err != nil { + t.Errorf("did not get the pending nonce: %v", err) + } + + if pendingNonce != uint64(1) { + t.Errorf("expected pending nonce of 1 got %v", pendingNonce) + } + + // make a new transaction with a nonce of 1 + tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not send tx: %v", err) + } + + // expect pending nonce to be 2 since account now has two transactions + pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr) + if err != nil { + t.Errorf("did not get the pending nonce: %v", err) + } + + if pendingNonce != uint64(2) { + t.Errorf("expected pending nonce of 2 got %v", pendingNonce) + } +} + +func TestSimulatedBackend_TransactionReceipt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + // create a signed transaction to send + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + + // send tx to simulated backend + err = sim.SendTransaction(bgCtx, signedTx) + if err != nil { + t.Errorf("could not add tx to pending block: %v", err) + } + sim.Commit() + + receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash()) + if err != nil { + t.Errorf("could not get transaction receipt: %v", err) + } + + if receipt.ContractAddress != testAddr && receipt.TxHash != signedTx.Hash() { + t.Errorf("received receipt is not correct: %v", receipt) + } +} + +func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { + sim := NewSimulatedBackend( + core.GenesisAlloc{}, + 10000000, + ) + defer sim.Close() + bgCtx := context.Background() + gasPrice, err := sim.SuggestGasPrice(bgCtx) + if err != nil { + t.Errorf("could not get gas price: %v", err) + } + if gasPrice.Uint64() != uint64(1) { + t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) + } +} + +func TestSimulatedBackend_PendingCodeAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + code, err := sim.CodeAt(bgCtx, testAddr, nil) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + if len(code) != 0 { + t.Errorf("got code for account that does not have contract code") + } + + parsed, err := abi.JSON(strings.NewReader(abiJSON)) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) + if err != nil { + t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) + } + + code, err = sim.PendingCodeAt(bgCtx, contractAddr) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + if len(code) == 0 { + t.Errorf("did not get code for account that has contract code") + } + // ensure code received equals code deployed + if !bytes.Equal(code, common.FromHex(deployedCode)) { + t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code) + } +} + +func TestSimulatedBackend_CodeAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + code, err := sim.CodeAt(bgCtx, testAddr, nil) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + if len(code) != 0 { + t.Errorf("got code for account that does not have contract code") + } + + parsed, err := abi.JSON(strings.NewReader(abiJSON)) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) + if err != nil { + t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) + } + + sim.Commit() + code, err = sim.CodeAt(bgCtx, contractAddr, nil) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + if len(code) == 0 { + t.Errorf("did not get code for account that has contract code") + } + // ensure code received equals code deployed + if !bytes.Equal(code, common.FromHex(deployedCode)) { + t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code) + } +} + +// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: +// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} +func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + parsed, err := abi.JSON(strings.NewReader(abiJSON)) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim) + if err != nil { + t.Errorf("could not deploy contract: %v", err) + } + + input, err := parsed.Pack("receive", []byte("X")) + if err != nil { + t.Errorf("could not pack receive function on contract: %v", err) + } + + // make sure you can call the contract in pending state + res, err := sim.PendingCallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }) + if err != nil { + t.Errorf("could not call receive method on contract: %v", err) + } + if len(res) == 0 { + t.Errorf("result of contract call was empty: %v", res) + } + + // while comparing against the byte array is more exact, also compare against the human readable string for readability + if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") { + t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res)) + } + + sim.Commit() + + // make sure you can call the contract + res, err = sim.CallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }, nil) + if err != nil { + t.Errorf("could not call receive method on contract: %v", err) + } + if len(res) == 0 { + t.Errorf("result of contract call was empty: %v", res) + } + + if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") { + t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res)) + } +} + +// This test is based on the following contract: +/* +contract Reverter { + function revertString() public pure{ + require(false, "some error"); + } + function revertNoString() public pure { + require(false, ""); + } + function revertASM() public pure { + assembly { + revert(0x0, 0x0) + } + } + function noRevert() public pure { + assembly { + // Assembles something that looks like require(false, "some error") but is not reverted + mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) + mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) + mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) + mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) + return(0x0, 0x64) + } + } +}*/ +func TestSimulatedBackend_CallContractRevert(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + reverterABI := `[{"inputs": [],"name": "noRevert","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertASM","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertNoString","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertString","outputs": [],"stateMutability": "pure","type": "function"}]` + reverterBin := "608060405234801561001057600080fd5b506101d3806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80634b409e01146100515780639b340e361461005b5780639bd6103714610065578063b7246fc11461006f575b600080fd5b610059610079565b005b6100636100ca565b005b61006d6100cf565b005b610077610145565b005b60006100c8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526000815260200160200191505060405180910390fd5b565b600080fd5b6000610143576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600a8152602001807f736f6d65206572726f720000000000000000000000000000000000000000000081525060200191505060405180910390fd5b565b7f08c379a0000000000000000000000000000000000000000000000000000000006000526020600452600a6024527f736f6d65206572726f720000000000000000000000000000000000000000000060445260646000f3fea2646970667358221220cdd8af0609ec4996b7360c7c780bad5c735740c64b1fffc3445aa12d37f07cb164736f6c63430006070033" + + parsed, err := abi.JSON(strings.NewReader(reverterABI)) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(reverterBin), sim) + if err != nil { + t.Errorf("could not deploy contract: %v", err) + } + + inputs := make(map[string]interface{}, 3) + inputs["revertASM"] = nil + inputs["revertNoString"] = "" + inputs["revertString"] = "some error" + + call := make([]func([]byte) ([]byte, error), 2) + call[0] = func(input []byte) ([]byte, error) { + return sim.PendingCallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }) + } + call[1] = func(input []byte) ([]byte, error) { + return sim.CallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }, nil) + } + + // Run pending calls then commit + for _, cl := range call { + for key, val := range inputs { + input, err := parsed.Pack(key) + if err != nil { + t.Errorf("could not pack %v function on contract: %v", key, err) + } + + res, err := cl(input) + if err == nil { + t.Errorf("call to %v was not reverted", key) + } + if res != nil { + t.Errorf("result from %v was not nil: %v", key, res) + } + if val != nil { + rerr, ok := err.(*revertError) + if !ok { + t.Errorf("expect revert error") + } + if rerr.Error() != "execution reverted: "+val.(string) { + t.Errorf("error was malformed: got %v want %v", rerr.Error(), val) + } + } else { + // revert(0x0,0x0) + if err.Error() != "execution reverted" { + t.Errorf("error was malformed: got %v want %v", err, "execution reverted") + } + } + } + input, err := parsed.Pack("noRevert") + if err != nil { + t.Errorf("could not pack noRevert function on contract: %v", err) + } + res, err := cl(input) + if err != nil { + t.Error("call to noRevert was reverted") + } + if res == nil { + t.Errorf("result from noRevert was nil") + } + sim.Commit() + } } diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 499b4bda07d4..f5a6fe22fc24 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -32,7 +32,7 @@ import ( // SignerFn is a signer function callback when a contract requires a method to // sign the transaction before submission. -type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error) +type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) // CallOpts is the collection of options to fine tune a contract call request. type CallOpts struct { @@ -49,7 +49,7 @@ type TransactOpts struct { Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state) Signer SignerFn // Method to use for signing the transaction (mandatory) - Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds) + Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) @@ -117,11 +117,14 @@ func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend Co // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. -func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error { +func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method string, params ...interface{}) error { // Don't crash on a lazy user if opts == nil { opts = new(CallOpts) } + if results == nil { + results = new([]interface{}) + } // Pack the input, call and unpack the results input, err := c.abi.Pack(method, params...) if err != nil { @@ -149,7 +152,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, } } else { output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber) - if err == nil && len(output) == 0 { + if err != nil { + return err + } + if len(output) == 0 { // Make sure we have a contract to operate on, and bail out otherwise. if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil { return err @@ -158,10 +164,14 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, } } } - if err != nil { + + if len(*results) == 0 { + res, err := c.abi.Unpack(method, output) + *results = res return err } - return c.abi.Unpack(result, method, output) + res := *results + return c.abi.UnpackIntoInterface(res[0], method, output) } // Transact invokes the (paid) contract method with params as input values. @@ -171,12 +181,24 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in if err != nil { return nil, err } + // todo(rjl493456442) check the method is payable or not, + // reject invalid transaction at the first place return c.transact(opts, &c.address, input) } +// RawTransact initiates a transaction with the given raw calldata as the input. +// It's usually used to initiate transactions for invoking **Fallback** function. +func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) { + // todo(rjl493456442) check the method is payable or not, + // reject invalid transaction at the first place + return c.transact(opts, &c.address, calldata) +} + // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) { + // todo(rjl493456442) check the payable fallback or receive is defined + // or not, reject invalid transaction at the first place return c.transact(opts, &c.address, nil) } @@ -234,7 +256,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } - signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx) + signedTx, err := opts.Signer(opts.From, rawTx) if err != nil { return nil, err } @@ -252,9 +274,9 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int opts = new(FilterOpts) } // Append the event selector to the query parameters and construct the topic set - query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...) + query = append([][]interface{}{{c.abi.Events[name].ID}}, query...) - topics, err := makeTopics(query...) + topics, err := abi.MakeTopics(query...) if err != nil { return nil, nil, err } @@ -301,9 +323,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter opts = new(WatchOpts) } // Append the event selector to the query parameters and construct the topic set - query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...) + query = append([][]interface{}{{c.abi.Events[name].ID}}, query...) - topics, err := makeTopics(query...) + topics, err := abi.MakeTopics(query...) if err != nil { return nil, nil, err } @@ -327,7 +349,7 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter // UnpackLog unpacks a retrieved log into the provided output structure. func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { if len(log.Data) > 0 { - if err := c.abi.Unpack(out, event, log.Data); err != nil { + if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { return err } } @@ -337,7 +359,7 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) indexed = append(indexed, arg) } } - return parseTopics(out, indexed, log.Topics[1:]) + return abi.ParseTopics(out, indexed, log.Topics[1:]) } // UnpackLogIntoMap unpacks a retrieved log into the provided map. @@ -353,7 +375,7 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin indexed = append(indexed, arg) } } - return parseTopicsIntoMap(out, indexed, log.Topics[1:]) + return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:]) } // ensureContext is a helper method to ensure a context is not nil, even if the diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index 3ae685e00f0a..c4740f68b750 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -17,9 +17,9 @@ package bind_test import ( - "bytes" "context" "math/big" + "reflect" "strings" "testing" @@ -34,8 +34,10 @@ import ( ) type mockCaller struct { - codeAtBlockNumber *big.Int - callContractBlockNumber *big.Int + codeAtBlockNumber *big.Int + callContractBlockNumber *big.Int + pendingCodeAtCalled bool + pendingCallContractCalled bool } func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { @@ -47,6 +49,16 @@ func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, b mc.callContractBlockNumber = blockNumber return nil, nil } + +func (mc *mockCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { + mc.pendingCodeAtCalled = true + return nil, nil +} + +func (mc *mockCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) { + mc.pendingCallContractCalled = true + return nil, nil +} func TestPassingBlockNumber(t *testing.T) { mc := &mockCaller{} @@ -59,11 +71,10 @@ func TestPassingBlockNumber(t *testing.T) { }, }, }, mc, nil, nil) - var ret string blockNumber := big.NewInt(42) - bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something") + bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, nil, "something") if mc.callContractBlockNumber != blockNumber { t.Fatalf("CallContract() was not passed the block number") @@ -73,7 +84,7 @@ func TestPassingBlockNumber(t *testing.T) { t.Fatalf("CodeAt() was not passed the block number") } - bc.Call(&bind.CallOpts{}, &ret, "something") + bc.Call(&bind.CallOpts{}, nil, "something") if mc.callContractBlockNumber != nil { t.Fatalf("CallContract() was passed a block number when it should not have been") @@ -82,57 +93,39 @@ func TestPassingBlockNumber(t *testing.T) { if mc.codeAtBlockNumber != nil { t.Fatalf("CodeAt() was passed a block number when it should not have been") } + + bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, nil, "something") + + if !mc.pendingCallContractCalled { + t.Fatalf("CallContract() was not passed the block number") + } + + if !mc.pendingCodeAtCalled { + t.Fatalf("CodeAt() was not passed the block number") + } } const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158" func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { hash := crypto.Keccak256Hash([]byte("testName")) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + common.HexToHash("0x0"), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "name": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["name"] != expectedReceivedMap["name"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { @@ -141,51 +134,23 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { t.Fatal(err) } hash := crypto.Keccak256Hash(sliceBytes) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + common.HexToHash("0x0"), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "names": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["names"] != expectedReceivedMap["names"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { @@ -194,51 +159,23 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { t.Fatal(err) } hash := crypto.Keccak256Hash(arrBytes) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + common.HexToHash("0x0"), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "addresses": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["addresses"] != expectedReceivedMap["addresses"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { @@ -249,99 +186,72 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { functionTyBytes := append(addrBytes, functionSelector...) var functionTy [24]byte copy(functionTy[:], functionTyBytes[0:24]) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), - common.BytesToHash(functionTyBytes), - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), + common.BytesToHash(functionTyBytes), } - + mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "function": functionTy, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["function"] != expectedReceivedMap["function"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) { - byts := []byte{1, 2, 3, 4, 5} - hash := crypto.Keccak256Hash(byts) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + bytes := []byte{1, 2, 3, 4, 5} + hash := crypto.Keccak256Hash(bytes) + topics := []common.Hash{ + common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "content": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) +} + +func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) { + received := make(map[string]interface{}) + if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil { t.Error(err) } - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["content"] != expectedReceivedMap["content"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") + if len(received) != len(expected) { + t.Fatalf("unpacked map length %v not equal expected length of %v", len(received), len(expected)) } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") + for name, elem := range expected { + if !reflect.DeepEqual(elem, received[name]) { + t.Errorf("field %v does not match expected, want %v, got %v", name, elem, received[name]) + } } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") +} + +func newMockLog(topics []common.Hash, txHash common.Hash) types.Log { + return types.Log{ + Address: common.HexToAddress("0x0"), + Topics: topics, + Data: hexutil.MustDecode(hexData), + BlockNumber: uint64(26), + TxHash: txHash, + TxIndex: 111, + BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), + Index: 7, + Removed: false, } } diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 13ac286b4552..0e98709b1455 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -52,7 +52,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // contracts is the map of each individual contract requested binding contracts = make(map[string]*tmplContract) - // structs is the map of all reclared structs shared by passed contracts. + // structs is the map of all redeclared structs shared by passed contracts. structs = make(map[string]*tmplStruct) // isLib is the map used to flag each encountered library as such @@ -77,11 +77,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] calls = make(map[string]*tmplMethod) transacts = make(map[string]*tmplMethod) events = make(map[string]*tmplEvent) + fallback *tmplMethod + receive *tmplMethod - // identifiers are used to detect duplicated identifier of function - // and event. For all calls, transacts and events, abigen will generate + // identifiers are used to detect duplicated identifiers of functions + // and events. For all calls, transacts and events, abigen will generate // corresponding bindings. However we have to ensure there is no - // identifier coliision in the bindings of these categories. + // identifier collisions in the bindings of these categories. callIdentifiers = make(map[string]bool) transactIdentifiers = make(map[string]bool) eventIdentifiers = make(map[string]bool) @@ -92,7 +94,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) // Ensure there is no duplicated identifier var identifiers = callIdentifiers - if !original.Const { + if !original.IsConstant() { identifiers = transactIdentifiers } if identifiers[normalizedName] { @@ -121,7 +123,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] } } // Append the methods to the call or transact lists - if original.Const { + if original.IsConstant() { calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} } else { transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} @@ -156,7 +158,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // Append the event to the accumulator list events[original.Name] = &tmplEvent{Original: original, Normalized: normalized} } - + // Add two special fallback functions if they exist + if evmABI.HasFallback() { + fallback = &tmplMethod{Original: evmABI.Fallback} + } + if evmABI.HasReceive() { + receive = &tmplMethod{Original: evmABI.Receive} + } // There is no easy way to pass arbitrary java objects to the Go side. if len(structs) > 0 && lang == LangJava { return "", errors.New("java binding for tuple arguments is not supported yet") @@ -169,6 +177,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] Constructor: evmABI.Constructor, Calls: calls, Transacts: transacts, + Fallback: fallback, + Receive: receive, Events: events, Libraries: make(map[string]string), } @@ -210,8 +220,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] "bindtype": bindType[lang], "bindtopictype": bindTopicType[lang], "namedtype": namedType[lang], - "formatmethod": formatMethod, - "formatevent": formatEvent, "capitalise": capitalise, "decapitalise": decapitalise, } @@ -238,7 +246,7 @@ var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) stri LangJava: bindTypeJava, } -// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go one. +// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. func bindBasicTypeGo(kind abi.Type) string { switch kind.T { case abi.AddressTy: @@ -278,7 +286,7 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java one. +// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones. func bindBasicTypeJava(kind abi.Type) string { switch kind.T { case abi.AddressTy: @@ -322,7 +330,7 @@ func bindBasicTypeJava(kind abi.Type) string { } // pluralizeJavaType explicitly converts multidimensional types to predefined -// type in go side. +// types in go side. func pluralizeJavaType(typ string) string { switch typ { case "boolean": @@ -361,7 +369,7 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) } // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same -// funcionality as for simple types, but dynamic types get converted to hashes. +// functionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeGo(kind, structs) @@ -378,7 +386,7 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } // bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same -// funcionality as for simple types, but dynamic types get converted to hashes. +// functionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeJava(kind, structs) @@ -386,7 +394,7 @@ func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { // parameters that are not value types i.e. arrays and structs are not // stored directly but instead a keccak256-hash of an encoding is stored. // - // We only convert stringS and bytes to hash, still need to deal with + // We only convert strings and bytes to hash, still need to deal with // array(both fixed-size and dynamic-size) and struct. if bound == "String" || bound == "byte[]" { bound = "Hash" @@ -407,7 +415,7 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - // We compose raw struct name and canonical parameter expression + // We compose a raw struct name and a canonical parameter expression // together here. The reason is before solidity v0.5.11, kind.TupleRawName // is empty, so we use canonical parameter expression to distinguish // different struct definition. From the consideration of backward @@ -446,7 +454,7 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - // We compose raw struct name and canonical parameter expression + // We compose a raw struct name and a canonical parameter expression // together here. The reason is before solidity v0.5.11, kind.TupleRawName // is empty, so we use canonical parameter expression to distinguish // different struct definition. From the consideration of backward @@ -478,7 +486,7 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { } // namedType is a set of functions that transform language specific types to -// named versions that my be used inside method names. +// named versions that may be used inside method names. var namedType = map[Lang]func(string, abi.Type) string{ LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, LangJava: namedTypeJava, @@ -520,16 +528,14 @@ func alias(aliases map[string]string, n string) string { } // methodNormalizer is a name transformer that modifies Solidity method names to -// conform to target language naming concentions. +// conform to target language naming conventions. var methodNormalizer = map[Lang]func(string) string{ LangGo: abi.ToCamelCase, LangJava: decapitalise, } // capitalise makes a camel-case string which starts with an upper case character. -func capitalise(input string) string { - return abi.ToCamelCase(input) -} +var capitalise = abi.ToCamelCase // decapitalise makes a camel-case string which starts with a lower case character. func decapitalise(input string) string { @@ -578,63 +584,3 @@ func hasStruct(t abi.Type) bool { return false } } - -// resolveArgName converts a raw argument representation into a user friendly format. -func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string { - var ( - prefix string - embedded string - typ = &arg.Type - ) -loop: - for { - switch typ.T { - case abi.SliceTy: - prefix += "[]" - case abi.ArrayTy: - prefix += fmt.Sprintf("[%d]", typ.Size) - default: - embedded = typ.TupleRawName + typ.String() - break loop - } - typ = typ.Elem - } - if s, exist := structs[embedded]; exist { - return prefix + s.Name - } else { - return arg.Type.String() - } -} - -// formatMethod transforms raw method representation into a user friendly one. -func formatMethod(method abi.Method, structs map[string]*tmplStruct) string { - inputs := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { - inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name) - } - outputs := make([]string, len(method.Outputs)) - for i, output := range method.Outputs { - outputs[i] = resolveArgName(output, structs) - if len(output.Name) > 0 { - outputs[i] += fmt.Sprintf(" %v", output.Name) - } - } - constant := "" - if method.Const { - constant = "constant " - } - return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", ")) -} - -// formatEvent transforms raw event representation into a user friendly one. -func formatEvent(event abi.Event, structs map[string]*tmplStruct) string { - inputs := make([]string, len(event.Inputs)) - for i, input := range event.Inputs { - if input.Indexed { - inputs[i] = fmt.Sprintf("%v indexed %v", resolveArgName(input, structs), input.Name) - } else { - inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name) - } - } - return fmt.Sprintf("event %v(%v)", event.RawName, strings.Join(inputs, ", ")) -} diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 7594af775e8b..1a8a17e45e7d 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -199,7 +199,8 @@ var bindTests = []struct { {"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]}, {"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]}, {"type":"event","name":"anonymous","anonymous":true,"inputs":[]}, - {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]} + {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}, + {"type":"event","name":"unnamed","inputs":[{"name":"","type":"uint256","indexed": true},{"name":"","type":"uint256","indexed":true}]} ] `}, ` @@ -249,6 +250,12 @@ var bindTests = []struct { fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present fmt.Println(res, str, dat, hash, err) + + oit, err := e.FilterUnnamed(nil, []*big.Int{}, []*big.Int{}) + + arg0 := oit.Event.Arg0 // Make sure unnamed arguments are handled correctly + arg1 := oit.Event.Arg1 // Make sure unnamed arguments are handled correctly + fmt.Println(arg0, arg1) } // Run a tiny reflection test to ensure disallowed methods don't appear if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok { @@ -289,7 +296,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -344,7 +351,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -390,7 +397,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -448,7 +455,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -496,7 +503,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -591,7 +598,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -641,7 +648,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -716,7 +723,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -810,7 +817,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1000,7 +1007,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1135,7 +1142,7 @@ var bindTests = []struct { ` key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1277,7 +1284,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1343,7 +1350,7 @@ var bindTests = []struct { ` // Initialize test accounts key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1384,7 +1391,7 @@ var bindTests = []struct { if n != 3 { t.Fatalf("Invalid bar0 event") } - case <-time.NewTimer(100 * time.Millisecond).C: + case <-time.NewTimer(3 * time.Second).C: t.Fatalf("Wait bar0 event timeout") } @@ -1395,7 +1402,7 @@ var bindTests = []struct { if n != 1 { t.Fatalf("Invalid bar event") } - case <-time.NewTimer(100 * time.Millisecond).C: + case <-time.NewTimer(3 * time.Second).C: t.Fatalf("Wait bar event timeout") } close(stopCh) @@ -1437,7 +1444,7 @@ var bindTests = []struct { sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, _, err := DeployIdentifierCollision(transactOpts, sim) if err != nil { t.Fatalf("failed to deploy contract: %v", err) @@ -1499,7 +1506,7 @@ var bindTests = []struct { sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c1, err := DeployContractOne(transactOpts, sim) if err != nil { t.Fatal("Failed to deploy contract") @@ -1530,6 +1537,154 @@ var bindTests = []struct { nil, []string{"ContractOne", "ContractTwo", "ExternalLib"}, }, + // Test the existence of the free retrieval calls + { + `PureAndView`, + `pragma solidity >=0.6.0; + contract PureAndView { + function PureFunc() public pure returns (uint) { + return 42; + } + function ViewFunc() public view returns (uint) { + return block.number; + } + } + `, + []string{`608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806376b5686a146037578063bb38c66c146053575b600080fd5b603d606f565b6040518082815260200191505060405180910390f35b60596077565b6040518082815260200191505060405180910390f35b600043905090565b6000602a90509056fea2646970667358221220d158c2ab7fdfce366a7998ec79ab84edd43b9815630bbaede2c760ea77f29f7f64736f6c63430006000033`}, + []string{`[{"inputs": [],"name": "PureFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "ViewFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "view","type": "function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + // Generate a new random account and a funded simulator + key, _ := crypto.GenerateKey() + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + defer sim.Close() + + // Deploy a tester contract and execute a structured call on it + _, _, pav, err := DeployPureAndView(auth, sim) + if err != nil { + t.Fatalf("Failed to deploy PureAndView contract: %v", err) + } + sim.Commit() + + // This test the existence of the free retreiver call for view and pure functions + if num, err := pav.PureFunc(nil); err != nil { + t.Fatalf("Failed to call anonymous field retriever: %v", err) + } else if num.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 42) + } + if num, err := pav.ViewFunc(nil); err != nil { + t.Fatalf("Failed to call anonymous field retriever: %v", err) + } else if num.Cmp(big.NewInt(1)) != 0 { + t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 1) + } + `, + nil, + nil, + nil, + nil, + }, + // Test fallback separation introduced in v0.6.0 + { + `NewFallbacks`, + ` + pragma solidity >=0.6.0 <0.7.0; + + contract NewFallbacks { + event Fallback(bytes data); + fallback() external { + bytes memory data; + assembly { + calldatacopy(data, 0, calldatasize()) + } + emit Fallback(data); + } + + event Received(address addr, uint value); + receive() external payable { + emit Received(msg.sender, msg.value); + } + } + `, + []string{"60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040526004361061000d575b36610081575b7f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a15b005b34801561008e5760006000fd5b505b606036600082377f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f986816040518080602001828103825283818151815260200191508051906020019080838360005b838110156100fa5780820151818401525b6020810190506100de565b50505050905090810190601f1680156101275780820380516001836020036101000a031916815260200191505b509250505060405180910390a1505b00fea26469706673582212205643ca37f40c2b352dc541f42e9e6720de065de756324b7fcc9fb1d67eda4a7d64736f6c63430006040033"}, + []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`}, + ` + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 1000000) + defer sim.Close() + + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + _, _, c, err := DeployNewFallbacks(opts, sim) + if err != nil { + t.Fatalf("Failed to deploy contract: %v", err) + } + sim.Commit() + + // Test receive function + opts.Value = big.NewInt(100) + c.Receive(opts) + sim.Commit() + + var gotEvent bool + iter, _ := c.FilterReceived(nil) + defer iter.Close() + for iter.Next() { + if iter.Event.Addr != addr { + t.Fatal("Msg.sender mismatch") + } + if iter.Event.Value.Uint64() != 100 { + t.Fatal("Msg.value mismatch") + } + gotEvent = true + break + } + if !gotEvent { + t.Fatal("Expect to receive event emitted by receive") + } + + // Test fallback function + opts.Value = nil + calldata := []byte{0x01, 0x02, 0x03} + c.Fallback(opts, calldata) + sim.Commit() + + iter2, _ := c.FilterFallback(nil) + defer iter2.Close() + for iter2.Next() { + if !bytes.Equal(iter2.Event.Data, calldata) { + t.Fatal("calldata mismatch") + } + gotEvent = true + break + } + if !gotEvent { + t.Fatal("Expect to receive event emitted by fallback") + } + `, + nil, + nil, + nil, + nil, + }, } // Tests that packages generated by the binder can be successfully compiled and @@ -1541,11 +1696,11 @@ func TestGolangBindings(t *testing.T) { t.Skip("go sdk not found for testing") } // Create a temporary workspace for the test suite - ws, err := ioutil.TempDir("", "") + ws, err := ioutil.TempDir("", "binding-test") if err != nil { t.Fatalf("failed to create temporary workspace: %v", err) } - defer os.RemoveAll(ws) + //defer os.RemoveAll(ws) pkg := filepath.Join(ws, "bindtest") if err = os.MkdirAll(pkg, 0700); err != nil { @@ -1665,13 +1820,10 @@ package bindtest; import org.ethereum.geth.*; import java.util.*; - - public class Test { // ABI is the input ABI used to generate the binding from. public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"; - // BYTECODE is the compiled bytecode used for deploying new contracts. public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037"; @@ -1679,8 +1831,6 @@ public class Test { public static Test deploy(TransactOpts auth, EthereumClient client) throws Exception { Interfaces args = Geth.newInterfaces(0); String bytecode = BYTECODE; - - return new Test(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); } @@ -1691,7 +1841,6 @@ public class Test { this.Contract = deployment; } - // Ethereum address where this contract is located at. public final Address Address; @@ -1706,9 +1855,6 @@ public class Test { this(Geth.bindContract(address, ABI, client)); } - - - // setAddress is a paid mutator transaction binding the contract method 0xe30081a0. // // Solidity: function setAddress(address a) returns(address) @@ -1988,9 +2134,7 @@ public class Test { return this.Contract.transact(opts, "setUint8" , args); } - } - `, }, } @@ -1999,7 +2143,22 @@ public class Test { if err != nil { t.Fatalf("test %d: failed to generate binding: %v", i, err) } - if binding != c.expected { + // Remove empty lines + removeEmptys := func(input string) string { + lines := strings.Split(input, "\n") + var index int + for _, line := range lines { + if strings.TrimSpace(line) != "" { + lines[index] = line + index += 1 + } + } + lines = lines[:index] + return strings.Join(lines, "\n") + } + binding = removeEmptys(binding) + expect := removeEmptys(c.expected) + if binding != expect { t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected) } } diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index c96dd1b9955d..8dac11f79f5d 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -30,11 +30,13 @@ type tmplData struct { type tmplContract struct { Type string // Type name of the main contract binding InputABI string // JSON ABI used as the input to generate the binding from - InputBin string // Optional EVM bytecode used to denetare deploy code from + InputBin string // Optional EVM bytecode used to generate deploy code from FuncSigs map[string]string // Optional map: string signature -> 4-byte signature Constructor abi.Method // Contract constructor for deploy parametrization Calls map[string]*tmplMethod // Contract calls that only read state data Transacts map[string]*tmplMethod // Contract calls that write state data + Fallback *tmplMethod // Additional special fallback function + Receive *tmplMethod // Additional special receive function Events map[string]*tmplEvent // Contract events accessors Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs Library bool // Indicator whether the contract is a library @@ -48,7 +50,8 @@ type tmplMethod struct { Structured bool // Whether the returns should be accumulated into a struct } -// tmplEvent is a wrapper around an a +// tmplEvent is a wrapper around an abi.Event that contains a few preprocessed +// and cached data fields. type tmplEvent struct { Original abi.Event // Original event as parsed by the abi package Normalized abi.Event // Normalized version of the parsed fields @@ -62,7 +65,7 @@ type tmplField struct { SolKind abi.Type // Raw abi type information } -// tmplStruct is a wrapper around an abi.tuple contains a auto-generated +// tmplStruct is a wrapper around an abi.tuple and contains an auto-generated // struct name. type tmplStruct struct { Name string // Auto-generated struct name(before solidity v0.5.11) or raw name. @@ -76,8 +79,8 @@ var tmplSource = map[Lang]string{ LangJava: tmplSourceJava, } -// tmplSourceGo is the Go source template use to generate the contract binding -// based on. +// tmplSourceGo is the Go source template that the generated Go contract binding +// is based on. const tmplSourceGo = ` // Code generated - DO NOT EDIT. // This file is a generated binding and any manual changes will be lost. @@ -101,7 +104,6 @@ var ( _ = big.NewInt _ = strings.NewReader _ = ethereum.NotFound - _ = abi.U256 _ = bind.Bind _ = common.Big1 _ = types.BloomLookup @@ -259,7 +261,7 @@ var ( // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. - func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...) } @@ -278,7 +280,7 @@ var ( // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. - func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...) } @@ -296,33 +298,37 @@ var ( {{range .Calls}} // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) { - {{if .Structured}}ret := new(struct{ - {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}} - {{end}} - }){{else}}var ( - {{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type $structs}}) - {{end}} - ){{end}} - out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{ - {{range $i, $_ := .Normalized.Outputs}}ret{{$i}}, - {{end}} - }{{end}}{{end}} - err := _{{$contract.Type}}.contract.Call(opts, out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) - return {{if .Structured}}*ret,{{else}}{{range $i, $_ := .Normalized.Outputs}}*ret{{$i}},{{end}}{{end}} err + var out []interface{} + err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) + {{if .Structured}} + outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} }) + {{range $i, $t := .Normalized.Outputs}} + outstruct.{{.Name}} = out[{{$i}}].({{bindtype .Type $structs}}){{end}} + + return *outstruct, err + {{else}} + if err != nil { + return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err + } + {{range $i, $t := .Normalized.Outputs}} + out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} + + return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err + {{end}} } // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } @@ -331,26 +337,72 @@ var ( {{range .Transacts}} // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}}) } {{end}} + {{if .Fallback}} + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.contract.RawTransact(opts, calldata) + } + + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) + } + + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) + } + {{end}} + + {{if .Receive}} + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function + } + + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) + } + + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) + } + {{end}} + {{range .Events}} // {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract. type {{$contract.Type}}{{.Normalized.Name}}Iterator struct { @@ -424,7 +476,7 @@ var ( // Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) { {{range .Normalized.Inputs}} {{if .Indexed}}var {{.Name}}Rule []interface{} @@ -441,7 +493,7 @@ var ( // Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) { {{range .Normalized.Inputs}} {{if .Indexed}}var {{.Name}}Rule []interface{} @@ -483,12 +535,13 @@ var ( // Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) { event := new({{$contract.Type}}{{.Normalized.Name}}) if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil { return nil, err } + event.Raw = log return event, nil } @@ -496,8 +549,8 @@ var ( {{end}} ` -// tmplSourceJava is the Java source template use to generate the contract binding -// based on. +// tmplSourceJava is the Java source template that the generated Java contract binding +// is based on. const tmplSourceJava = ` // This file is an automatically generated Java binding. Do not modify as any // change will likely be lost upon the next re-generation! @@ -577,7 +630,7 @@ import java.util.*; // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // // Solidity: {{.Original.String}} - public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { + public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); {{end}} @@ -611,6 +664,24 @@ import java.util.*; return this.Contract.transact(opts, "{{.Original.Name}}" , args); } {{end}} + + {{if .Fallback}} + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception { + return this.Contract.rawTransact(opts, calldata); + } + {{end}} + + {{if .Receive}} + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + public Transaction Receive(TransactOpts opts) throws Exception { + return this.Contract.rawTransact(opts, null); + } + {{end}} } {{end}} ` diff --git a/accounts/abi/bind/topics.go b/accounts/abi/bind/topics.go deleted file mode 100644 index e27fa5484204..000000000000 --- a/accounts/abi/bind/topics.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind - -import ( - "encoding/binary" - "errors" - "fmt" - "math/big" - "reflect" - - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// makeTopics converts a filter query argument list into a filter topic set. -func makeTopics(query ...[]interface{}) ([][]common.Hash, error) { - topics := make([][]common.Hash, len(query)) - for i, filter := range query { - for _, rule := range filter { - var topic common.Hash - - // Try to generate the topic based on simple types - switch rule := rule.(type) { - case common.Hash: - copy(topic[:], rule[:]) - case common.Address: - copy(topic[common.HashLength-common.AddressLength:], rule[:]) - case *big.Int: - blob := rule.Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case bool: - if rule { - topic[common.HashLength-1] = 1 - } - case int8: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int16: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int32: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int64: - blob := big.NewInt(rule).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint8: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint16: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint32: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint64: - blob := new(big.Int).SetUint64(rule).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case string: - hash := crypto.Keccak256Hash([]byte(rule)) - copy(topic[:], hash[:]) - case []byte: - hash := crypto.Keccak256Hash(rule) - copy(topic[:], hash[:]) - - default: - // todo(rjl493456442) according solidity documentation, indexed event - // parameters that are not value types i.e. arrays and structs are not - // stored directly but instead a keccak256-hash of an encoding is stored. - // - // We only convert stringS and bytes to hash, still need to deal with - // array(both fixed-size and dynamic-size) and struct. - - // Attempt to generate the topic from funky types - val := reflect.ValueOf(rule) - switch { - // static byte array - case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: - reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) - default: - return nil, fmt.Errorf("unsupported indexed type: %T", rule) - } - } - topics[i] = append(topics[i], topic) - } - } - return topics, nil -} - -// Big batch of reflect types for topic reconstruction. -var ( - reflectHash = reflect.TypeOf(common.Hash{}) - reflectAddress = reflect.TypeOf(common.Address{}) - reflectBigInt = reflect.TypeOf(new(big.Int)) -) - -// parseTopics converts the indexed topic fields into actual log field values. -// -// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256 -// hashes as the topic value! -func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error { - // Sanity check that the fields and topics match up - if len(fields) != len(topics) { - return errors.New("topic/field count mismatch") - } - // Iterate over all the fields and reconstruct them from topics - for _, arg := range fields { - if !arg.Indexed { - return errors.New("non-indexed field in topic reconstruction") - } - field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name)) - - // Try to parse the topic back into the fields based on primitive types - switch field.Kind() { - case reflect.Bool: - if topics[0][common.HashLength-1] == 1 { - field.Set(reflect.ValueOf(true)) - } - case reflect.Int8: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int8(num.Int64()))) - - case reflect.Int16: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int16(num.Int64()))) - - case reflect.Int32: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int32(num.Int64()))) - - case reflect.Int64: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(num.Int64())) - - case reflect.Uint8: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint8(num.Uint64()))) - - case reflect.Uint16: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint16(num.Uint64()))) - - case reflect.Uint32: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint32(num.Uint64()))) - - case reflect.Uint64: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(num.Uint64())) - - default: - // Ran out of plain primitive types, try custom types - - switch field.Type() { - case reflectHash: // Also covers all dynamic types - field.Set(reflect.ValueOf(topics[0])) - - case reflectAddress: - var addr common.Address - copy(addr[:], topics[0][common.HashLength-common.AddressLength:]) - field.Set(reflect.ValueOf(addr)) - - case reflectBigInt: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(num)) - - default: - // Ran out of custom types, try the crazies - switch { - // static byte array - case arg.Type.T == abi.FixedBytesTy: - reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size])) - default: - return fmt.Errorf("unsupported indexed type: %v", arg.Type) - } - } - } - topics = topics[1:] - } - return nil -} - -// parseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs -func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics []common.Hash) error { - // Sanity check that the fields and topics match up - if len(fields) != len(topics) { - return errors.New("topic/field count mismatch") - } - // Iterate over all the fields and reconstruct them from topics - for _, arg := range fields { - if !arg.Indexed { - return errors.New("non-indexed field in topic reconstruction") - } - - switch arg.Type.T { - case abi.BoolTy: - out[arg.Name] = topics[0][common.HashLength-1] == 1 - case abi.IntTy, abi.UintTy: - num := new(big.Int).SetBytes(topics[0][:]) - out[arg.Name] = num - case abi.AddressTy: - var addr common.Address - copy(addr[:], topics[0][common.HashLength-common.AddressLength:]) - out[arg.Name] = addr - case abi.HashTy: - out[arg.Name] = topics[0] - case abi.FixedBytesTy: - out[arg.Name] = topics[0][:] - case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy: - // Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash - // whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash - out[arg.Name] = topics[0] - case abi.FunctionTy: - if garbage := binary.BigEndian.Uint64(topics[0][0:8]); garbage != 0 { - return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[0].Bytes()) - } - var tmp [24]byte - copy(tmp[:], topics[0][8:32]) - out[arg.Name] = tmp - default: // Not handling tuples - return fmt.Errorf("unsupported indexed type: %v", arg.Type) - } - - topics = topics[1:] - } - - return nil -} diff --git a/accounts/abi/bind/topics_test.go b/accounts/abi/bind/topics_test.go deleted file mode 100644 index f18e2d1bd25e..000000000000 --- a/accounts/abi/bind/topics_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind - -import ( - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" -) - -func TestMakeTopics(t *testing.T) { - type args struct { - query [][]interface{} - } - tests := []struct { - name string - args args - want [][]common.Hash - wantErr bool - }{ - { - "support fixed byte types, right padded to 32 bytes", - args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}}, - [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := makeTopics(tt.args.query...) - if (err != nil) != tt.wantErr { - t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("makeTopics() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestParseTopics(t *testing.T) { - type bytesStruct struct { - StaticBytes [5]byte - } - bytesType, _ := abi.NewType("bytes5", "", nil) - type args struct { - createObj func() interface{} - resultObj func() interface{} - fields abi.Arguments - topics []common.Hash - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "support fixed byte types, right padded to 32 bytes", - args: args{ - createObj: func() interface{} { return &bytesStruct{} }, - resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} }, - fields: abi.Arguments{abi.Argument{ - Name: "staticBytes", - Type: bytesType, - Indexed: true, - }}, - topics: []common.Hash{ - {1, 2, 3, 4, 5}, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - createObj := tt.args.createObj() - if err := parseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { - t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr) - } - resultObj := tt.args.resultObj() - if !reflect.DeepEqual(createObj, resultObj) { - t.Errorf("parseTopics() = %v, want %v", createObj, resultObj) - } - }) - } -} diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go index d129993ca12f..118abc59a7f1 100644 --- a/accounts/abi/bind/util.go +++ b/accounts/abi/bind/util.go @@ -18,7 +18,7 @@ package bind import ( "context" - "fmt" + "errors" "time" "github.com/ethereum/go-ethereum/common" @@ -56,14 +56,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty // contract address when it is mined. It stops waiting when ctx is canceled. func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) { if tx.To() != nil { - return common.Address{}, fmt.Errorf("tx is not contract creation") + return common.Address{}, errors.New("tx is not contract creation") } receipt, err := WaitMined(ctx, b, tx) if err != nil { return common.Address{}, err } if receipt.ContractAddress == (common.Address{}) { - return common.Address{}, fmt.Errorf("zero address") + return common.Address{}, errors.New("zero address") } // Check that code has indeed been deployed at the address. // This matters on pre-Homestead chains: OOG in the constructor diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index e0141f46e06f..9f9b7a000d6b 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -18,6 +18,7 @@ package bind_test import ( "context" + "errors" "math/big" "testing" "time" @@ -84,7 +85,7 @@ func TestWaitDeployed(t *testing.T) { select { case <-mined: if err != test.wantErr { - t.Errorf("test %q: error mismatch: got %q, want %q", name, err, test.wantErr) + t.Errorf("test %q: error mismatch: want %q, got %q", name, test.wantErr, err) } if address != test.wantAddress { t.Errorf("test %q: unexpected contract address %s", name, address.Hex()) @@ -94,3 +95,40 @@ func TestWaitDeployed(t *testing.T) { } } } + +func TestWaitDeployedCornerCases(t *testing.T) { + backend := backends.NewSimulatedBackend( + core.GenesisAlloc{ + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + }, + 10000000, + ) + defer backend.Close() + + // Create a transaction to an account. + code := "6060604052600a8060106000396000f360606040526008565b00" + tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + backend.SendTransaction(ctx, tx) + backend.Commit() + notContentCreation := errors.New("tx is not contract creation") + if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() { + t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err) + } + + // Create a transaction that is not mined. + tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + + go func() { + contextCanceled := errors.New("context canceled") + if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() { + t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err) + } + }() + + backend.SendTransaction(ctx, tx) + cancel() +} diff --git a/accounts/abi/error.go b/accounts/abi/error.go index 9d8674ad088b..f0f71b6c9164 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -39,23 +39,21 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string { // type in t. func sliceTypeCheck(t Type, val reflect.Value) error { if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { - return typeErr(formatSliceString(t.Kind, t.Size), val.Type()) + return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) } if t.T == ArrayTy && val.Len() != t.Size { - return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) } - if t.Elem.T == SliceTy { + if t.Elem.T == SliceTy || t.Elem.T == ArrayTy { if val.Len() > 0 { return sliceTypeCheck(*t.Elem, val.Index(0)) } - } else if t.Elem.T == ArrayTy { - return sliceTypeCheck(*t.Elem, val.Index(0)) } - if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind { - return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type()) + if val.Type().Elem().Kind() != t.Elem.GetType().Kind() { + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type()) } return nil } @@ -68,10 +66,10 @@ func typeCheck(t Type, value reflect.Value) error { } // Check base type validity. Element types will be checked later on. - if t.Kind != value.Kind() { - return typeErr(t.Kind, value.Kind()) + if t.GetType().Kind() != value.Kind() { + return typeErr(t.GetType().Kind(), value.Kind()) } else if t.T == FixedBytesTy && t.Size != value.Len() { - return typeErr(t.Type, value.Type()) + return typeErr(t.GetType(), value.Type()) } else { return nil } diff --git a/accounts/abi/event.go b/accounts/abi/event.go index f1474813afe4..b238a36d7cea 100644 --- a/accounts/abi/event.go +++ b/accounts/abi/event.go @@ -32,7 +32,7 @@ type Event struct { // the raw name and a suffix will be added in the case of a event overload. // // e.g. - // There are two events have same name: + // These are two events that have the same name: // * foo(int,int) // * foo(uint,uint) // The event name of the first one wll be resolved as foo while the second one @@ -42,36 +42,59 @@ type Event struct { RawName string Anonymous bool Inputs Arguments + str string + // Sig contains the string signature according to the ABI spec. + // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the event's signature used by the + // abi definition to identify event names and types. + ID common.Hash } -func (e Event) String() string { - inputs := make([]string, len(e.Inputs)) - for i, input := range e.Inputs { - inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name) +// NewEvent creates a new Event. +// It sanitizes the input arguments to remove unnamed arguments. +// It also precomputes the id, signature and string representation +// of the event. +func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event { + // sanitize inputs to remove inputs without names + // and precompute string and sig representation. + names := make([]string, len(inputs)) + types := make([]string, len(inputs)) + for i, input := range inputs { + if input.Name == "" { + inputs[i] = Argument{ + Name: fmt.Sprintf("arg%d", i), + Indexed: input.Indexed, + Type: input.Type, + } + } else { + inputs[i] = input + } + // string representation + names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name) if input.Indexed { - inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name) + names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name) } + // sig representation + types[i] = input.Type.String() } - return fmt.Sprintf("event %v(%v)", e.RawName, strings.Join(inputs, ", ")) -} -// Sig returns the event string signature according to the ABI spec. -// -// Example -// -// event foo(uint32 a, int b) = "foo(uint32,int256)" -// -// Please note that "int" is substitute for its canonical representation "int256" -func (e Event) Sig() string { - types := make([]string, len(e.Inputs)) - for i, input := range e.Inputs { - types[i] = input.Type.String() + str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", ")) + sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ",")) + id := common.BytesToHash(crypto.Keccak256([]byte(sig))) + + return Event{ + Name: name, + RawName: rawName, + Anonymous: anonymous, + Inputs: inputs, + str: str, + Sig: sig, + ID: id, } - return fmt.Sprintf("%v(%v)", e.RawName, strings.Join(types, ",")) } -// ID returns the canonical representation of the event's signature used by the -// abi definition to identify event names and types. -func (e Event) ID() common.Hash { - return common.BytesToHash(crypto.Keccak256([]byte(e.Sig()))) +func (e Event) String() string { + return e.str } diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go index 090b9217dbdd..3332f8a07216 100644 --- a/accounts/abi/event_test.go +++ b/accounts/abi/event_test.go @@ -104,8 +104,8 @@ func TestEventId(t *testing.T) { } for name, event := range abi.Events { - if event.ID() != test.expectations[name] { - t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID()) + if event.ID != test.expectations[name] { + t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID) } } } @@ -147,10 +147,6 @@ func TestEventString(t *testing.T) { // TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array. func TestEventMultiValueWithArrayUnpack(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]` - type testStruct struct { - Value1 [2]uint8 - Value2 uint8 - } abi, err := JSON(strings.NewReader(definition)) require.NoError(t, err) var b bytes.Buffer @@ -158,10 +154,10 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) { for ; i <= 3; i++ { b.Write(packNum(reflect.ValueOf(i))) } - var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) - require.Equal(t, [2]uint8{1, 2}, rst.Value1) - require.Equal(t, uint8(3), rst.Value2) + unpacked, err := abi.Unpack("test", b.Bytes()) + require.NoError(t, err) + require.Equal(t, [2]uint8{1, 2}, unpacked[0]) + require.Equal(t, uint8(3), unpacked[1]) } func TestEventTupleUnpack(t *testing.T) { @@ -312,14 +308,14 @@ func TestEventTupleUnpack(t *testing.T) { &[]interface{}{common.Address{}, new(big.Int)}, &[]interface{}{}, jsonEventPledge, - "abi: insufficient number of elements in the list/array for unpack, want 3, got 2", + "abi: insufficient number of arguments for unpack, want 3, got 2", "Can not unpack Pledge event into too short slice", }, { pledgeData1, new(map[string]interface{}), &[]interface{}{}, jsonEventPledge, - "abi: cannot unmarshal tuple into map[string]interface {}", + "abi:[2] cannot unmarshal tuple in to map[string]interface {}", "Can not unpack Pledge event into map", }, { mixedCaseData1, @@ -351,14 +347,14 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass var e Event assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI") a := ABI{Events: map[string]Event{"e": e}} - return a.Unpack(dest, "e", data) + return a.UnpackIntoInterface(dest, "e", data) } // TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder. func TestEventUnpackIndexed(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]` type testStruct struct { - Value1 uint8 + Value1 uint8 // indexed Value2 uint8 } abi, err := JSON(strings.NewReader(definition)) @@ -366,16 +362,16 @@ func TestEventUnpackIndexed(t *testing.T) { var b bytes.Buffer b.Write(packNum(reflect.ValueOf(uint8(8)))) var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) + require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes())) require.Equal(t, uint8(0), rst.Value1) require.Equal(t, uint8(8), rst.Value2) } -// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input. +// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input. func TestEventIndexedWithArrayUnpack(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]` type testStruct struct { - Value1 [2]uint8 + Value1 [2]uint8 // indexed Value2 string } abi, err := JSON(strings.NewReader(definition)) @@ -388,7 +384,7 @@ func TestEventIndexedWithArrayUnpack(t *testing.T) { b.Write(common.RightPadBytes([]byte(stringOut), 32)) var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) + require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes())) require.Equal(t, [2]uint8{0, 0}, rst.Value1) require.Equal(t, stringOut, rst.Value2) } diff --git a/accounts/abi/method.go b/accounts/abi/method.go index 7da2e18fc61a..f69e3ee9b562 100644 --- a/accounts/abi/method.go +++ b/accounts/abi/method.go @@ -23,11 +23,29 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +// FunctionType represents different types of functions a contract might have. +type FunctionType int + +const ( + // Constructor represents the constructor of the contract. + // The constructor function is called while deploying a contract. + Constructor FunctionType = iota + // Fallback represents the fallback function. + // This function is executed if no other function matches the given function + // signature and no receive function is specified. + Fallback + // Receive represents the receive function. + // This function is executed on plain Ether transfers. + Receive + // Function represents a normal function. + Function +) + // Method represents a callable given a `Name` and whether the method is a constant. // If the method is `Const` no transaction needs to be created for this // particular Method call. It can easily be simulated using a local VM. // For example a `Balance()` method only needs to retrieve something -// from the storage and therefore requires no Tx to be send to the +// from the storage and therefore requires no Tx to be sent to the // network. A method such as `Transact` does require a Tx and thus will // be flagged `false`. // Input specifies the required input parameters for this gives method. @@ -36,55 +54,114 @@ type Method struct { // the raw name and a suffix will be added in the case of a function overload. // // e.g. - // There are two functions have same name: + // These are two functions that have the same name: // * foo(int,int) // * foo(uint,uint) // The method name of the first one will be resolved as foo while the second one // will be resolved as foo0. - Name string - // RawName is the raw method name parsed from ABI. - RawName string - Const bool + Name string + RawName string // RawName is the raw method name parsed from ABI + + // Type indicates whether the method is a + // special fallback introduced in solidity v0.6.0 + Type FunctionType + + // StateMutability indicates the mutability state of method, + // the default value is nonpayable. It can be empty if the abi + // is generated by legacy compiler. + StateMutability string + + // Legacy indicators generated by compiler before v0.6.0 + Constant bool + Payable bool + Inputs Arguments Outputs Arguments + str string + // Sig returns the methods string signature according to the ABI spec. + // e.g. function foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the method's signature used by the + // abi definition to identify method names and types. + ID []byte } -// Sig returns the methods string signature according to the ABI spec. -// -// Example -// -// function foo(uint32 a, int b) = "foo(uint32,int256)" -// -// Please note that "int" is substitute for its canonical representation "int256" -func (method Method) Sig() string { - types := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { +// NewMethod creates a new Method. +// A method should always be created using NewMethod. +// It also precomputes the sig representation and the string representation +// of the method. +func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method { + var ( + types = make([]string, len(inputs)) + inputNames = make([]string, len(inputs)) + outputNames = make([]string, len(outputs)) + ) + for i, input := range inputs { + inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name) types[i] = input.Type.String() } - return fmt.Sprintf("%v(%v)", method.RawName, strings.Join(types, ",")) -} - -func (method Method) String() string { - inputs := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { - inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name) - } - outputs := make([]string, len(method.Outputs)) - for i, output := range method.Outputs { - outputs[i] = output.Type.String() + for i, output := range outputs { + outputNames[i] = output.Type.String() if len(output.Name) > 0 { - outputs[i] += fmt.Sprintf(" %v", output.Name) + outputNames[i] += fmt.Sprintf(" %v", output.Name) } } - constant := "" - if method.Const { - constant = "constant " + // calculate the signature and method id. Note only function + // has meaningful signature and id. + var ( + sig string + id []byte + ) + if funType == Function { + sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ",")) + id = crypto.Keccak256([]byte(sig))[:4] + } + // Extract meaningful state mutability of solidity method. + // If it's default value, never print it. + state := mutability + if state == "nonpayable" { + state = "" + } + if state != "" { + state = state + " " + } + identity := fmt.Sprintf("function %v", rawName) + if funType == Fallback { + identity = "fallback" + } else if funType == Receive { + identity = "receive" + } else if funType == Constructor { + identity = "constructor" + } + str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", ")) + + return Method{ + Name: name, + RawName: rawName, + Type: funType, + StateMutability: mutability, + Constant: isConst, + Payable: isPayable, + Inputs: inputs, + Outputs: outputs, + str: str, + Sig: sig, + ID: id, } - return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", ")) } -// ID returns the canonical representation of the method's signature used by the -// abi definition to identify method names and types. -func (method Method) ID() []byte { - return crypto.Keccak256([]byte(method.Sig()))[:4] +func (method Method) String() string { + return method.str +} + +// IsConstant returns the indicator whether the method is read-only. +func (method Method) IsConstant() bool { + return method.StateMutability == "view" || method.StateMutability == "pure" || method.Constant +} + +// IsPayable returns the indicator whether the method can process +// plain ether transfers. +func (method Method) IsPayable() bool { + return method.StateMutability == "payable" || method.Payable } diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go index 3ffdb702b35b..395a5289654a 100644 --- a/accounts/abi/method_test.go +++ b/accounts/abi/method_test.go @@ -23,13 +23,15 @@ import ( const methoddata = ` [ - {"type": "function", "name": "balance", "constant": true }, - {"type": "function", "name": "send", "constant": false, "inputs": [{ "name": "amount", "type": "uint256" }]}, - {"type": "function", "name": "transfer", "constant": false, "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]}, + {"type": "function", "name": "balance", "stateMutability": "view"}, + {"type": "function", "name": "send", "inputs": [{ "name": "amount", "type": "uint256" }]}, + {"type": "function", "name": "transfer", "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple"}],"name":"tuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[]"}],"name":"tupleSlice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5]"}],"name":"tupleArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"} + {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, + {"stateMutability":"nonpayable","type":"fallback"}, + {"stateMutability":"payable","type":"receive"} ]` func TestMethodString(t *testing.T) { @@ -39,7 +41,7 @@ func TestMethodString(t *testing.T) { }{ { method: "balance", - expectation: "function balance() constant returns()", + expectation: "function balance() view returns()", }, { method: "send", @@ -65,6 +67,14 @@ func TestMethodString(t *testing.T) { method: "complexTuple", expectation: "function complexTuple((uint256,uint256)[5][] a) returns()", }, + { + method: "fallback", + expectation: "fallback() returns()", + }, + { + method: "receive", + expectation: "receive() payable returns()", + }, } abi, err := JSON(strings.NewReader(methoddata)) @@ -73,7 +83,14 @@ func TestMethodString(t *testing.T) { } for _, test := range table { - got := abi.Methods[test.method].String() + var got string + if test.method == "fallback" { + got = abi.Fallback.String() + } else if test.method == "receive" { + got = abi.Receive.String() + } else { + got = abi.Methods[test.method].String() + } if got != test.expectation { t.Errorf("expected string to be %s, got %s", test.expectation, got) } @@ -120,7 +137,7 @@ func TestMethodSig(t *testing.T) { } for _, test := range cases { - got := abi.Methods[test.method].Sig() + got := abi.Methods[test.method].Sig if got != test.expect { t.Errorf("expected string to be %s, got %s", test.expect, got) } diff --git a/accounts/abi/pack.go b/accounts/abi/pack.go index dd1c9a5df84f..0cd91cb4fad9 100644 --- a/accounts/abi/pack.go +++ b/accounts/abi/pack.go @@ -17,6 +17,8 @@ package abi import ( + "errors" + "fmt" "math/big" "reflect" @@ -25,7 +27,7 @@ import ( ) // packBytesSlice packs the given bytes as [L, V] as the canonical representation -// bytes slice +// bytes slice. func packBytesSlice(bytes []byte, l int) []byte { len := packNum(reflect.ValueOf(l)) return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...) @@ -33,49 +35,51 @@ func packBytesSlice(bytes []byte, l int) []byte { // packElement packs the given reflect value according to the abi specification in // t. -func packElement(t Type, reflectValue reflect.Value) []byte { +func packElement(t Type, reflectValue reflect.Value) ([]byte, error) { switch t.T { case IntTy, UintTy: - return packNum(reflectValue) + return packNum(reflectValue), nil case StringTy: - return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()) + return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()), nil case AddressTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return common.LeftPadBytes(reflectValue.Bytes(), 32) + return common.LeftPadBytes(reflectValue.Bytes(), 32), nil case BoolTy: if reflectValue.Bool() { - return math.PaddedBigBytes(common.Big1, 32) + return math.PaddedBigBytes(common.Big1, 32), nil } - return math.PaddedBigBytes(common.Big0, 32) + return math.PaddedBigBytes(common.Big0, 32), nil case BytesTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()) + if reflectValue.Type() != reflect.TypeOf([]byte{}) { + return []byte{}, errors.New("Bytes type is neither slice nor array") + } + return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil case FixedBytesTy, FunctionTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return common.RightPadBytes(reflectValue.Bytes(), 32) + return common.RightPadBytes(reflectValue.Bytes(), 32), nil default: - panic("abi: fatal error") + return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T) } } -// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation +// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation. func packNum(value reflect.Value) []byte { switch kind := value.Kind(); kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return U256(new(big.Int).SetUint64(value.Uint())) + return math.U256Bytes(new(big.Int).SetUint64(value.Uint())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return U256(big.NewInt(value.Int())) + return math.U256Bytes(big.NewInt(value.Int())) case reflect.Ptr: - return U256(new(big.Int).Set(value.Interface().(*big.Int))) + return math.U256Bytes(new(big.Int).Set(value.Interface().(*big.Int))) default: panic("abi: fatal error") } - } diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go index cf649b480734..5c7cb1cc1a24 100644 --- a/accounts/abi/pack_test.go +++ b/accounts/abi/pack_test.go @@ -18,623 +18,51 @@ package abi import ( "bytes" + "encoding/hex" + "fmt" "math" "math/big" "reflect" + "strconv" "strings" "testing" "github.com/ethereum/go-ethereum/common" ) +// TestPack tests the general pack/unpack tests in packing_test.go func TestPack(t *testing.T) { - for i, test := range []struct { - typ string - components []ArgumentMarshaling - input interface{} - output []byte - }{ - { - "uint8", - nil, - uint8(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint8[]", - nil, - []uint8{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint16", - nil, - uint16(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint16[]", - nil, - []uint16{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint32", - nil, - uint32(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint32[]", - nil, - []uint32{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint64", - nil, - uint64(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint64[]", - nil, - []uint64{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint256", - nil, - big.NewInt(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint256[]", - nil, - []*big.Int{big.NewInt(1), big.NewInt(2)}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int8", - nil, - int8(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int8[]", - nil, - []int8{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int16", - nil, - int16(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int16[]", - nil, - []int16{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int32", - nil, - int32(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int32[]", - nil, - []int32{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int64", - nil, - int64(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int64[]", - nil, - []int64{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int256", - nil, - big.NewInt(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int256[]", - nil, - []*big.Int{big.NewInt(1), big.NewInt(2)}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "bytes1", - nil, - [1]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes2", - nil, - [2]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes3", - nil, - [3]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes4", - nil, - [4]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes5", - nil, - [5]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes6", - nil, - [6]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes7", - nil, - [7]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes8", - nil, - [8]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes9", - nil, - [9]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes10", - nil, - [10]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes11", - nil, - [11]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes12", - nil, - [12]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes13", - nil, - [13]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes14", - nil, - [14]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes15", - nil, - [15]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes16", - nil, - [16]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes17", - nil, - [17]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes18", - nil, - [18]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes19", - nil, - [19]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes20", - nil, - [20]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes21", - nil, - [21]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes22", - nil, - [22]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes23", - nil, - [23]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes24", - nil, - [24]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes25", - nil, - [25]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes26", - nil, - [26]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes27", - nil, - [27]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes28", - nil, - [28]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes29", - nil, - [29]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes30", - nil, - [30]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes31", - nil, - [31]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes32", - nil, - [32]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "uint32[2][3][4]", - nil, - [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"), - }, - { - "address[]", - nil, - []common.Address{{1}, {2}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"), - }, - { - "bytes32[]", - nil, - []common.Hash{{1}, {2}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"), - }, - { - "function", - nil, - [24]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "string", - nil, - "foobar", - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"), - }, - { - "string[]", - nil, - []string{"hello", "foobar"}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 - "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 - "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] - "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 - "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1] - }, - { - "string[2]", - nil, - []string{"hello", "foobar"}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0 - "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1 - "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 - "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] - "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 - "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1] - }, - { - "bytes32[][]", - nil, - [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 - "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 - "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - - { - "bytes32[][2]", - nil, - [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 - "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - - { - "bytes32[3][2]", - nil, - [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2] - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - { - // static tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "int64"}, - {Name: "b", Type: "int256"}, - {Name: "c", Type: "int256"}, - {Name: "d", Type: "bool"}, - {Name: "e", Type: "bytes32[3][2]"}, - }, - struct { - A int64 - B *big.Int - C *big.Int - D bool - E [][]common.Hash - }{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a] - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c] - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d] - "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1] - "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2] - "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2] - }, - { - // dynamic tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "string"}, - {Name: "b", Type: "int64"}, - {Name: "c", Type: "bytes"}, - {Name: "d", Type: "string[]"}, - {Name: "e", Type: "int256[]"}, - {Name: "f", Type: "address[]"}, - }, - struct { - FieldA string `abi:"a"` // Test whether abi tag works - FieldB int64 `abi:"b"` - C []byte - D []string - E []*big.Int - F []common.Address - }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}}, - common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] - "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset - "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset - "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset - "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset - "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length - "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar" - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length - "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1} - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length - "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset - "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset - "0000000000000000000000000000000000000000000000000000000000000003" + // foo length - "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo - "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset - "6261720000000000000000000000000000000000000000000000000000000000" + // bar - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length - "0000000000000000000000000000000000000000000000000000000000000001" + // 1 - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1 - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length - "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1} - "0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2} - }, - { - // nested tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}}, - {Name: "b", Type: "int256[]"}, - }, - struct { - A struct { - FieldA *big.Int `abi:"a"` - B []*big.Int - } - B []*big.Int - }{ - A: struct { - FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple - B []*big.Int - }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}}, - B: []*big.Int{big.NewInt(1), big.NewInt(0)}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset - "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset - "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value - "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset - "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length - "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value - "0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value - "0000000000000000000000000000000000000000000000000000000000000002" + // b length - "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value - "0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value - }, - { - // tuple slice - "tuple[]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256"}, - {Name: "b", Type: "int256[]"}, - }, - []struct { - A *big.Int - B []*big.Int - }{ - {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}}, - {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}}, - }, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset - "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value - "0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value - }, - { - // static tuple array - "tuple[2]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256"}, - {Name: "b", Type: "int256"}, - }, - [2]struct { - A *big.Int - B *big.Int - }{ - {big.NewInt(-1), big.NewInt(1)}, - {big.NewInt(1), big.NewInt(-1)}, - }, - common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b - }, - { - // dynamic tuple array - "tuple[2]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256[]"}, - }, - [2]struct { - A []*big.Int - }{ - {[]*big.Int{big.NewInt(-1), big.NewInt(1)}}, - {[]*big.Int{big.NewInt(1), big.NewInt(-1)}}, - }, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset - "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset - "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0] - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1] - "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0] - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1] - }, - } { - typ, err := NewType(test.typ, "", test.components) - if err != nil { - t.Fatalf("%v failed. Unexpected parse error: %v", i, err) - } - output, err := typ.pack(reflect.ValueOf(test.input)) - if err != nil { - t.Fatalf("%v failed. Unexpected pack error: %v", i, err) - } - - if !bytes.Equal(output, test.output) { - t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output) - } + for i, test := range packUnpackTests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + encb, err := hex.DecodeString(test.packed) + if err != nil { + t.Fatalf("invalid hex %s: %v", test.packed, err) + } + inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "inputs": %s}]`, test.def) + inAbi, err := JSON(strings.NewReader(inDef)) + if err != nil { + t.Fatalf("invalid ABI definition %s, %v", inDef, err) + } + var packed []byte + packed, err = inAbi.Pack("method", test.unpacked) + + if err != nil { + t.Fatalf("test %d (%v) failed: %v", i, test.def, err) + } + if !reflect.DeepEqual(packed[4:], encb) { + t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, encb, packed[4:]) + } + }) } } func TestMethodPack(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } - sig := abi.Methods["slice"].ID() + sig := abi.Methods["slice"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -648,7 +76,7 @@ func TestMethodPack(t *testing.T) { } var addrA, addrB = common.Address{1}, common.Address{2} - sig = abi.Methods["sliceAddress"].ID() + sig = abi.Methods["sliceAddress"].ID sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) sig = append(sig, common.LeftPadBytes(addrA[:], 32)...) @@ -663,7 +91,7 @@ func TestMethodPack(t *testing.T) { } var addrC, addrD = common.Address{3}, common.Address{4} - sig = abi.Methods["sliceMultiAddress"].ID() + sig = abi.Methods["sliceMultiAddress"].ID sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -681,7 +109,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["slice256"].ID() + sig = abi.Methods["slice256"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -695,7 +123,7 @@ func TestMethodPack(t *testing.T) { } a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}} - sig = abi.Methods["nestedArray"].ID() + sig = abi.Methods["nestedArray"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -712,7 +140,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["nestedArray2"].ID() + sig = abi.Methods["nestedArray2"].ID sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...) @@ -728,7 +156,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["nestedSlice"].ID() + sig = abi.Methods["nestedSlice"].ID sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...) diff --git a/accounts/abi/packing_test.go b/accounts/abi/packing_test.go new file mode 100644 index 000000000000..eae3b0df2056 --- /dev/null +++ b/accounts/abi/packing_test.go @@ -0,0 +1,990 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +type packUnpackTest struct { + def string + unpacked interface{} + packed string +} + +var packUnpackTests = []packUnpackTest{ + // Booleans + { + def: `[{ "type": "bool" }]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: true, + }, + { + def: `[{ "type": "bool" }]`, + packed: "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: false, + }, + // Integers + { + def: `[{ "type": "uint8" }]`, + unpacked: uint8(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint8[]" }]`, + unpacked: []uint8{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint16" }]`, + unpacked: uint16(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint16[]" }]`, + unpacked: []uint16{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint17"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: big.NewInt(1), + }, + { + def: `[{"type": "uint32"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: uint32(1), + }, + { + def: `[{"type": "uint32[]"}]`, + unpacked: []uint32{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint64"}]`, + unpacked: uint64(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint64[]"}]`, + unpacked: []uint64{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint256"}]`, + unpacked: big.NewInt(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint256[]"}]`, + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int8"}]`, + unpacked: int8(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int8[]"}]`, + unpacked: []int8{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int16"}]`, + unpacked: int16(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int16[]"}]`, + unpacked: []int16{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int17"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: big.NewInt(1), + }, + { + def: `[{"type": "int32"}]`, + unpacked: int32(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int32"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: int32(1), + }, + { + def: `[{"type": "int32[]"}]`, + unpacked: []int32{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int64"}]`, + unpacked: int64(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int64[]"}]`, + unpacked: []int64{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int256"}]`, + unpacked: big.NewInt(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int256"}]`, + packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + unpacked: big.NewInt(-1), + }, + { + def: `[{"type": "int256[]"}]`, + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + // Address + { + def: `[{"type": "address"}]`, + packed: "0000000000000000000000000100000000000000000000000000000000000000", + unpacked: common.Address{1}, + }, + { + def: `[{"type": "address[]"}]`, + unpacked: []common.Address{{1}, {2}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000100000000000000000000000000000000000000" + + "0000000000000000000000000200000000000000000000000000000000000000", + }, + // Bytes + { + def: `[{"type": "bytes1"}]`, + unpacked: [1]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes2"}]`, + unpacked: [2]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes3"}]`, + unpacked: [3]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes4"}]`, + unpacked: [4]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes5"}]`, + unpacked: [5]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes6"}]`, + unpacked: [6]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes7"}]`, + unpacked: [7]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes8"}]`, + unpacked: [8]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes9"}]`, + unpacked: [9]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes10"}]`, + unpacked: [10]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes11"}]`, + unpacked: [11]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes12"}]`, + unpacked: [12]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes13"}]`, + unpacked: [13]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes14"}]`, + unpacked: [14]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes15"}]`, + unpacked: [15]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes16"}]`, + unpacked: [16]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes17"}]`, + unpacked: [17]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes18"}]`, + unpacked: [18]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes19"}]`, + unpacked: [19]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes20"}]`, + unpacked: [20]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes21"}]`, + unpacked: [21]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes22"}]`, + unpacked: [22]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes23"}]`, + unpacked: [23]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes24"}]`, + unpacked: [24]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes25"}]`, + unpacked: [25]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes26"}]`, + unpacked: [26]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes27"}]`, + unpacked: [27]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes28"}]`, + unpacked: [28]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes29"}]`, + unpacked: [29]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes30"}]`, + unpacked: [30]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes31"}]`, + unpacked: [31]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes32"}]`, + unpacked: [32]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes32"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + def: `[{"type": "bytes"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), + }, + { + def: `[{"type": "bytes32"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + // Functions + { + def: `[{"type": "function"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [24]byte{1}, + }, + // Slice and Array + { + def: `[{"type": "uint8[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint8{1, 2}, + }, + { + def: `[{"type": "uint8[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: []uint8{}, + }, + { + def: `[{"type": "uint256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: []*big.Int{}, + }, + { + def: `[{"type": "uint8[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint8{1, 2}, + }, + { + def: `[{"type": "int8[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int8{1, 2}, + }, + { + def: `[{"type": "int16[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int16{1, 2}, + }, + { + def: `[{"type": "int16[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int16{1, 2}, + }, + { + def: `[{"type": "int32[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int32{1, 2}, + }, + { + def: `[{"type": "int32[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int32{1, 2}, + }, + { + def: `[{"type": "int64[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int64{1, 2}, + }, + { + def: `[{"type": "int64[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int64{1, 2}, + }, + { + def: `[{"type": "int256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "int256[3]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, + }, + // multi dimensional, if these pass, all types that don't require length prefix should pass + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [][]uint8{}, + }, + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000a0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000a0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [][]uint8{{1, 2}, {1, 2, 3}}, + }, + { + def: `[{"type": "uint8[2][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2][2]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint8[][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000060" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [2][]uint8{{}, {}}, + }, + { + def: `[{"type": "uint8[][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: [2][]uint8{{1}, {1}}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [][2]uint8{}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][2]uint8{{1, 2}}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][2]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint16[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint16{1, 2}, + }, + { + def: `[{"type": "uint16[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint16{1, 2}, + }, + { + def: `[{"type": "uint32[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint32{1, 2}, + }, + { + def: `[{"type": "uint32[2][3][4]"}]`, + unpacked: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "0000000000000000000000000000000000000000000000000000000000000004" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "0000000000000000000000000000000000000000000000000000000000000006" + + "0000000000000000000000000000000000000000000000000000000000000007" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "0000000000000000000000000000000000000000000000000000000000000009" + + "000000000000000000000000000000000000000000000000000000000000000a" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "000000000000000000000000000000000000000000000000000000000000000c" + + "000000000000000000000000000000000000000000000000000000000000000d" + + "000000000000000000000000000000000000000000000000000000000000000e" + + "000000000000000000000000000000000000000000000000000000000000000f" + + "0000000000000000000000000000000000000000000000000000000000000010" + + "0000000000000000000000000000000000000000000000000000000000000011" + + "0000000000000000000000000000000000000000000000000000000000000012" + + "0000000000000000000000000000000000000000000000000000000000000013" + + "0000000000000000000000000000000000000000000000000000000000000014" + + "0000000000000000000000000000000000000000000000000000000000000015" + + "0000000000000000000000000000000000000000000000000000000000000016" + + "0000000000000000000000000000000000000000000000000000000000000017" + + "0000000000000000000000000000000000000000000000000000000000000018", + }, + + { + def: `[{"type": "bytes32[]"}]`, + unpacked: [][32]byte{{1}, {2}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0100000000000000000000000000000000000000000000000000000000000000" + + "0200000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "uint32[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint32{1, 2}, + }, + { + def: `[{"type": "uint64[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint64{1, 2}, + }, + { + def: `[{"type": "uint64[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint64{1, 2}, + }, + { + def: `[{"type": "uint256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "uint256[3]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, + }, + { + def: `[{"type": "string[4]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "00000000000000000000000000000000000000000000000000000000000000c0" + + "0000000000000000000000000000000000000000000000000000000000000100" + + "0000000000000000000000000000000000000000000000000000000000000140" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "48656c6c6f000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "576f726c64000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "476f2d657468657265756d000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "457468657265756d000000000000000000000000000000000000000000000000", + unpacked: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"}, + }, + { + def: `[{"type": "string[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "457468657265756d000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "676f2d657468657265756d000000000000000000000000000000000000000000", + unpacked: []string{"Ethereum", "go-ethereum"}, + }, + { + def: `[{"type": "bytes[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "f0f0f00000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "f0f0f00000000000000000000000000000000000000000000000000000000000", + unpacked: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}}, + }, + { + def: `[{"type": "uint256[2][][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000e0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000000c8" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000003e8" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000000c8" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000003e8", + unpacked: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}}, + }, + // struct outputs + { + def: `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: struct { + Int1 *big.Int + Int2 *big.Int + }{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"components": [{"name":"int_one","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int__one","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int_one_","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: struct { + IntOne *big.Int + Intone *big.Int + }{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "string"}]`, + unpacked: "foobar", + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000006" + + "666f6f6261720000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "string[]"}]`, + unpacked: []string{"hello", "foobar"}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 + "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] + "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 + "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1] + }, + { + def: `[{"type": "string[2]"}]`, + unpacked: [2]string{"hello", "foobar"}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0 + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1 + "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 + "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] + "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 + "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1] + }, + { + def: `[{"type": "bytes32[][]"}]`, + unpacked: [][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 + "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + def: `[{"type": "bytes32[][2]"}]`, + unpacked: [2][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 + "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + def: `[{"type": "bytes32[3][2]"}]`, + unpacked: [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}, + packed: "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2] + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + // static tuple + def: `[{"components": [{"name":"a","type":"int64"}, + {"name":"b","type":"int256"}, + {"name":"c","type":"int256"}, + {"name":"d","type":"bool"}, + {"name":"e","type":"bytes32[3][2]"}], "type":"tuple"}]`, + unpacked: struct { + A int64 + B *big.Int + C *big.Int + D bool + E [2][3][32]byte + }{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a] + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c] + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d] + "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1] + "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2] + "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2] + }, + { + def: `[{"components": [{"name":"a","type":"string"}, + {"name":"b","type":"int64"}, + {"name":"c","type":"bytes"}, + {"name":"d","type":"string[]"}, + {"name":"e","type":"int256[]"}, + {"name":"f","type":"address[]"}], "type":"tuple"}]`, + unpacked: struct { + A string + B int64 + C []byte + D []string + E []*big.Int + F []common.Address + }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a + "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] + "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset + "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset + "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset + "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset + "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length + "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar" + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length + "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1} + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length + "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset + "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset + "0000000000000000000000000000000000000000000000000000000000000003" + // foo length + "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo + "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset + "6261720000000000000000000000000000000000000000000000000000000000" + // bar + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length + "0000000000000000000000000000000000000000000000000000000000000001" + // 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1 + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length + "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1} + "0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2} + }, + { + def: `[{"components": [{ "type": "tuple","components": [{"name": "a","type": "uint256"}, + {"name": "b","type": "uint256[]"}], + "name": "a","type": "tuple"}, + {"name": "b","type": "uint256[]"}], "type": "tuple"}]`, + unpacked: struct { + A struct { + A *big.Int + B []*big.Int + } + B []*big.Int + }{ + A: struct { + A *big.Int + B []*big.Int + }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}}, + B: []*big.Int{big.NewInt(1), big.NewInt(2)}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a + "0000000000000000000000000000000000000000000000000000000000000040" + // a offset + "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset + "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value + "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset + "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length + "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value + "0000000000000000000000000000000000000000000000000000000000000002" + // a.b[1] value + "0000000000000000000000000000000000000000000000000000000000000002" + // b length + "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value + "0000000000000000000000000000000000000000000000000000000000000002", // b[1] value + }, + + { + def: `[{"components": [{"name": "a","type": "int256"}, + {"name": "b","type": "int256[]"}], + "name": "a","type": "tuple[]"}]`, + unpacked: []struct { + A *big.Int + B []*big.Int + }{ + {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(3)}}, + {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}}, + }, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple length + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset + "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value + "0000000000000000000000000000000000000000000000000000000000000003" + // tuple[0].B[1] value + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].B[1] value + }, + { + def: `[{"components": [{"name": "a","type": "int256"}, + {"name": "b","type": "int256"}], + "name": "a","type": "tuple[2]"}]`, + unpacked: [2]struct { + A *big.Int + B *big.Int + }{ + {big.NewInt(-1), big.NewInt(1)}, + {big.NewInt(1), big.NewInt(-1)}, + }, + packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].b + }, + { + def: `[{"components": [{"name": "a","type": "int256[]"}], + "name": "a","type": "tuple[2]"}]`, + unpacked: [2]struct { + A []*big.Int + }{ + {[]*big.Int{big.NewInt(-1), big.NewInt(1)}}, + {[]*big.Int{big.NewInt(1), big.NewInt(-1)}}, + }, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset + "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset + "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0] + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1] + "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0] + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].A[1] + }, +} diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 73ca8fa2bd7d..11248e07302e 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -17,57 +17,74 @@ package abi import ( + "errors" "fmt" + "math/big" "reflect" "strings" ) -// indirect recursively dereferences the value until it either gets the value -// or finds a big.Int -func indirect(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbigT { - return indirect(v.Elem()) +// ConvertType converts an interface of a runtime type into a interface of the +// given type +// e.g. turn +// var fields []reflect.StructField +// fields = append(fields, reflect.StructField{ +// Name: "X", +// Type: reflect.TypeOf(new(big.Int)), +// Tag: reflect.StructTag("json:\"" + "x" + "\""), +// } +// into +// type TupleT struct { X *big.Int } +func ConvertType(in interface{}, proto interface{}) interface{} { + protoType := reflect.TypeOf(proto) + if reflect.TypeOf(in).ConvertibleTo(protoType) { + return reflect.ValueOf(in).Convert(protoType).Interface() } - return v + // Use set as a last ditch effort + if err := set(reflect.ValueOf(proto), reflect.ValueOf(in)); err != nil { + panic(err) + } + return proto } -// indirectInterfaceOrPtr recursively dereferences the value until value is not interface. -func indirectInterfaceOrPtr(v reflect.Value) reflect.Value { - if (v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr) && v.Elem().IsValid() { +// indirect recursively dereferences the value until it either gets the value +// or finds a big.Int +func indirect(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) { return indirect(v.Elem()) } return v } -// reflectIntKind returns the reflect using the given size and +// reflectIntType returns the reflect using the given size and // unsignedness. -func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) { +func reflectIntType(unsigned bool, size int) reflect.Type { + if unsigned { + switch size { + case 8: + return reflect.TypeOf(uint8(0)) + case 16: + return reflect.TypeOf(uint16(0)) + case 32: + return reflect.TypeOf(uint32(0)) + case 64: + return reflect.TypeOf(uint64(0)) + } + } switch size { case 8: - if unsigned { - return reflect.Uint8, uint8T - } - return reflect.Int8, int8T + return reflect.TypeOf(int8(0)) case 16: - if unsigned { - return reflect.Uint16, uint16T - } - return reflect.Int16, int16T + return reflect.TypeOf(int16(0)) case 32: - if unsigned { - return reflect.Uint32, uint32T - } - return reflect.Int32, int32T + return reflect.TypeOf(int32(0)) case 64: - if unsigned { - return reflect.Uint64, uint64T - } - return reflect.Int64, int64T + return reflect.TypeOf(int64(0)) } - return reflect.Ptr, bigT + return reflect.TypeOf(&big.Int{}) } -// mustArrayToBytesSlice creates a new byte slice with the exact same size as value +// mustArrayToByteSlice creates a new byte slice with the exact same size as value // and copies the bytes in value to the new slice. func mustArrayToByteSlice(value reflect.Value) reflect.Value { slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len()) @@ -84,12 +101,16 @@ func set(dst, src reflect.Value) error { switch { case dstType.Kind() == reflect.Interface && dst.Elem().IsValid(): return set(dst.Elem(), src) - case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT: + case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}): return set(dst.Elem(), src) case srcType.AssignableTo(dstType) && dst.CanSet(): dst.Set(src) - case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice: + case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice && dst.CanSet(): return setSlice(dst, src) + case dstType.Kind() == reflect.Array: + return setArray(dst, src) + case dstType.Kind() == reflect.Struct: + return setStruct(dst, src) default: return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type()) } @@ -98,38 +119,59 @@ func set(dst, src reflect.Value) error { // setSlice attempts to assign src to dst when slices are not assignable by default // e.g. src: [][]byte -> dst: [][15]byte +// setSlice ignores if we cannot copy all of src' elements. func setSlice(dst, src reflect.Value) error { slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len()) for i := 0; i < src.Len(); i++ { - v := src.Index(i) - reflect.Copy(slice.Index(i), v) + if src.Index(i).Kind() == reflect.Struct { + if err := set(slice.Index(i), src.Index(i)); err != nil { + return err + } + } else { + // e.g. [][32]uint8 to []common.Hash + if err := set(slice.Index(i), src.Index(i)); err != nil { + return err + } + } } - - dst.Set(slice) - return nil + if dst.CanSet() { + dst.Set(slice) + return nil + } + return errors.New("Cannot set slice, destination not settable") } -// requireAssignable assures that `dest` is a pointer and it's not an interface. -func requireAssignable(dst, src reflect.Value) error { - if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface { - return fmt.Errorf("abi: cannot unmarshal %v into %v", src.Type(), dst.Type()) +func setArray(dst, src reflect.Value) error { + if src.Kind() == reflect.Ptr { + return set(dst, indirect(src)) } - return nil + array := reflect.New(dst.Type()).Elem() + min := src.Len() + if src.Len() > dst.Len() { + min = dst.Len() + } + for i := 0; i < min; i++ { + if err := set(array.Index(i), src.Index(i)); err != nil { + return err + } + } + if dst.CanSet() { + dst.Set(array) + return nil + } + return errors.New("Cannot set array, destination not settable") } -// requireUnpackKind verifies preconditions for unpacking `args` into `kind` -func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind, - args Arguments) error { - - switch k { - case reflect.Struct: - case reflect.Slice, reflect.Array: - if minLen := args.LengthNonIndexed(); v.Len() < minLen { - return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d", - minLen, v.Len()) +func setStruct(dst, src reflect.Value) error { + for i := 0; i < src.NumField(); i++ { + srcField := src.Field(i) + dstField := dst.Field(i) + if !dstField.IsValid() || !srcField.IsValid() { + return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField) + } + if err := set(dstField, srcField); err != nil { + return err } - default: - return fmt.Errorf("abi: cannot unmarshal tuple into %v", t) } return nil } @@ -156,9 +198,8 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri continue } // skip fields that have no abi:"" tag. - var ok bool - var tagName string - if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok { + tagName, ok := typ.Field(i).Tag.Lookup("abi") + if !ok { continue } // check if tag is empty. diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index c425e6e54bff..cf13a79da84e 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -17,6 +17,7 @@ package abi import ( + "math/big" "reflect" "testing" ) @@ -189,3 +190,72 @@ func TestReflectNameToStruct(t *testing.T) { }) } } + +func TestConvertType(t *testing.T) { + // Test Basic Struct + type T struct { + X *big.Int + Y *big.Int + } + // Create on-the-fly structure + var fields []reflect.StructField + fields = append(fields, reflect.StructField{ + Name: "X", + Type: reflect.TypeOf(new(big.Int)), + Tag: "json:\"" + "x" + "\"", + }) + fields = append(fields, reflect.StructField{ + Name: "Y", + Type: reflect.TypeOf(new(big.Int)), + Tag: "json:\"" + "y" + "\"", + }) + val := reflect.New(reflect.StructOf(fields)) + val.Elem().Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val.Elem().Field(1).Set(reflect.ValueOf(big.NewInt(2))) + // ConvertType + out := *ConvertType(val.Interface(), new(T)).(*T) + if out.X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out.X, big.NewInt(1)) + } + if out.Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out.Y, big.NewInt(2)) + } + // Slice Type + val2 := reflect.MakeSlice(reflect.SliceOf(reflect.StructOf(fields)), 2, 2) + val2.Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val2.Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2))) + val2.Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3))) + val2.Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4))) + out2 := *ConvertType(val2.Interface(), new([]T)).(*[]T) + if out2[0].X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1)) + } + if out2[0].Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2)) + } + if out2[1].X.Cmp(big.NewInt(3)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1)) + } + if out2[1].Y.Cmp(big.NewInt(4)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2)) + } + // Array Type + val3 := reflect.New(reflect.ArrayOf(2, reflect.StructOf(fields))) + val3.Elem().Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val3.Elem().Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2))) + val3.Elem().Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3))) + val3.Elem().Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4))) + out3 := *ConvertType(val3.Interface(), new([2]T)).(*[2]T) + if out3[0].X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1)) + } + if out3[0].Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2)) + } + if out3[1].X.Cmp(big.NewInt(3)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1)) + } + if out3[1].Y.Cmp(big.NewInt(4)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2)) + } +} diff --git a/accounts/abi/topics.go b/accounts/abi/topics.go new file mode 100644 index 000000000000..360df7d5e846 --- /dev/null +++ b/accounts/abi/topics.go @@ -0,0 +1,173 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// MakeTopics converts a filter query argument list into a filter topic set. +func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) { + topics := make([][]common.Hash, len(query)) + for i, filter := range query { + for _, rule := range filter { + var topic common.Hash + + // Try to generate the topic based on simple types + switch rule := rule.(type) { + case common.Hash: + copy(topic[:], rule[:]) + case common.Address: + copy(topic[common.HashLength-common.AddressLength:], rule[:]) + case *big.Int: + blob := rule.Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case bool: + if rule { + topic[common.HashLength-1] = 1 + } + case int8: + copy(topic[:], genIntType(int64(rule), 1)) + case int16: + copy(topic[:], genIntType(int64(rule), 2)) + case int32: + copy(topic[:], genIntType(int64(rule), 4)) + case int64: + copy(topic[:], genIntType(rule, 8)) + case uint8: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint16: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint32: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint64: + blob := new(big.Int).SetUint64(rule).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case string: + hash := crypto.Keccak256Hash([]byte(rule)) + copy(topic[:], hash[:]) + case []byte: + hash := crypto.Keccak256Hash(rule) + copy(topic[:], hash[:]) + + default: + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert stringS and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. + + // Attempt to generate the topic from funky types + val := reflect.ValueOf(rule) + switch { + // static byte array + case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: + reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) + default: + return nil, fmt.Errorf("unsupported indexed type: %T", rule) + } + } + topics[i] = append(topics[i], topic) + } + } + return topics, nil +} + +func genIntType(rule int64, size uint) []byte { + var topic [common.HashLength]byte + if rule < 0 { + // if a rule is negative, we need to put it into two's complement. + // extended to common.HashLength bytes. + topic = [common.HashLength]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} + } + for i := uint(0); i < size; i++ { + topic[common.HashLength-i-1] = byte(rule >> (i * 8)) + } + return topic[:] +} + +// ParseTopics converts the indexed topic fields into actual log field values. +func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error { + return parseTopicWithSetter(fields, topics, + func(arg Argument, reconstr interface{}) { + field := reflect.ValueOf(out).Elem().FieldByName(ToCamelCase(arg.Name)) + field.Set(reflect.ValueOf(reconstr)) + }) +} + +// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs. +func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error { + return parseTopicWithSetter(fields, topics, + func(arg Argument, reconstr interface{}) { + out[arg.Name] = reconstr + }) +} + +// parseTopicWithSetter converts the indexed topic field-value pairs and stores them using the +// provided set function. +// +// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256 +// hashes as the topic value! +func parseTopicWithSetter(fields Arguments, topics []common.Hash, setter func(Argument, interface{})) error { + // Sanity check that the fields and topics match up + if len(fields) != len(topics) { + return errors.New("topic/field count mismatch") + } + // Iterate over all the fields and reconstruct them from topics + for i, arg := range fields { + if !arg.Indexed { + return errors.New("non-indexed field in topic reconstruction") + } + var reconstr interface{} + switch arg.Type.T { + case TupleTy: + return errors.New("tuple type in topic reconstruction") + case StringTy, BytesTy, SliceTy, ArrayTy: + // Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash + // whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash + reconstr = topics[i] + case FunctionTy: + if garbage := binary.BigEndian.Uint64(topics[i][0:8]); garbage != 0 { + return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[i].Bytes()) + } + var tmp [24]byte + copy(tmp[:], topics[i][8:32]) + reconstr = tmp + default: + var err error + reconstr, err = toGoType(0, arg.Type, topics[i].Bytes()) + if err != nil { + return err + } + } + // Use the setter function to store the value + setter(arg, reconstr) + } + + return nil +} diff --git a/accounts/abi/topics_test.go b/accounts/abi/topics_test.go new file mode 100644 index 000000000000..4a539a71166c --- /dev/null +++ b/accounts/abi/topics_test.go @@ -0,0 +1,381 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "math/big" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func TestMakeTopics(t *testing.T) { + type args struct { + query [][]interface{} + } + tests := []struct { + name string + args args + want [][]common.Hash + wantErr bool + }{ + { + "support fixed byte types, right padded to 32 bytes", + args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, + false, + }, + { + "support common hash types in topics", + args{[][]interface{}{{common.Hash{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, + false, + }, + { + "support address types in topics", + args{[][]interface{}{{common.Address{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5}}}, + false, + }, + { + "support *big.Int types in topics", + args{[][]interface{}{{big.NewInt(1).Lsh(big.NewInt(2), 254)}}}, + [][]common.Hash{{common.Hash{128}}}, + false, + }, + { + "support boolean types in topics", + args{[][]interface{}{ + {true}, + {false}, + }}, + [][]common.Hash{ + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0}}, + }, + false, + }, + { + "support int/uint(8/16/32/64) types in topics", + args{[][]interface{}{ + {int8(-2)}, + {int16(-3)}, + {int32(-4)}, + {int64(-5)}, + {int8(1)}, + {int16(256)}, + {int32(65536)}, + {int64(4294967296)}, + {uint8(1)}, + {uint16(256)}, + {uint32(65536)}, + {uint64(4294967296)}, + }}, + [][]common.Hash{ + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 253}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 252}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, + }, + false, + }, + { + "support string types in topics", + args{[][]interface{}{{"hello world"}}}, + [][]common.Hash{{crypto.Keccak256Hash([]byte("hello world"))}}, + false, + }, + { + "support byte slice types in topics", + args{[][]interface{}{{[]byte{1, 2, 3}}}}, + [][]common.Hash{{crypto.Keccak256Hash([]byte{1, 2, 3})}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := MakeTopics(tt.args.query...) + if (err != nil) != tt.wantErr { + t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("makeTopics() = %v, want %v", got, tt.want) + } + }) + } +} + +type args struct { + createObj func() interface{} + resultObj func() interface{} + resultMap func() map[string]interface{} + fields Arguments + topics []common.Hash +} + +type bytesStruct struct { + StaticBytes [5]byte +} +type int8Struct struct { + Int8Value int8 +} +type int256Struct struct { + Int256Value *big.Int +} + +type hashStruct struct { + HashValue common.Hash +} + +type funcStruct struct { + FuncValue [24]byte +} + +type topicTest struct { + name string + args args + wantErr bool +} + +func setupTopicsTests() []topicTest { + bytesType, _ := NewType("bytes5", "", nil) + int8Type, _ := NewType("int8", "", nil) + int256Type, _ := NewType("int256", "", nil) + tupleType, _ := NewType("tuple(int256,int8)", "", nil) + stringType, _ := NewType("string", "", nil) + funcType, _ := NewType("function", "", nil) + + tests := []topicTest{ + { + name: "support fixed byte types, right padded to 32 bytes", + args: args{ + createObj: func() interface{} { return &bytesStruct{} }, + resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}} + }, + fields: Arguments{Argument{ + Name: "staticBytes", + Type: bytesType, + Indexed: true, + }}, + topics: []common.Hash{ + {1, 2, 3, 4, 5}, + }, + }, + wantErr: false, + }, + { + name: "int8 with negative value", + args: args{ + createObj: func() interface{} { return &int8Struct{} }, + resultObj: func() interface{} { return &int8Struct{Int8Value: -1} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"int8Value": int8(-1)} + }, + fields: Arguments{Argument{ + Name: "int8Value", + Type: int8Type, + Indexed: true, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "int256 with negative value", + args: args{ + createObj: func() interface{} { return &int256Struct{} }, + resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"int256Value": big.NewInt(-1)} + }, + fields: Arguments{Argument{ + Name: "int256Value", + Type: int256Type, + Indexed: true, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "hash type", + args: args{ + createObj: func() interface{} { return &hashStruct{} }, + resultObj: func() interface{} { return &hashStruct{crypto.Keccak256Hash([]byte("stringtopic"))} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"hashValue": crypto.Keccak256Hash([]byte("stringtopic"))} + }, + fields: Arguments{Argument{ + Name: "hashValue", + Type: stringType, + Indexed: true, + }}, + topics: []common.Hash{ + crypto.Keccak256Hash([]byte("stringtopic")), + }, + }, + wantErr: false, + }, + { + name: "function type", + args: args{ + createObj: func() interface{} { return &funcStruct{} }, + resultObj: func() interface{} { + return &funcStruct{[24]byte{255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"funcValue": [24]byte{255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + }, + fields: Arguments{Argument{ + Name: "funcValue", + Type: funcType, + Indexed: true, + }}, + topics: []common.Hash{ + {0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "error on topic/field count mismatch", + args: args{ + createObj: func() interface{} { return nil }, + resultObj: func() interface{} { return nil }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "tupletype", + Type: tupleType, + Indexed: true, + }}, + topics: []common.Hash{}, + }, + wantErr: true, + }, + { + name: "error on unindexed arguments", + args: args{ + createObj: func() interface{} { return &int256Struct{} }, + resultObj: func() interface{} { return &int256Struct{} }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "int256Value", + Type: int256Type, + Indexed: false, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: true, + }, + { + name: "error on tuple in topic reconstruction", + args: args{ + createObj: func() interface{} { return &tupleType }, + resultObj: func() interface{} { return &tupleType }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "tupletype", + Type: tupleType, + Indexed: true, + }}, + topics: []common.Hash{{0}}, + }, + wantErr: true, + }, + { + name: "error on improper encoded function", + args: args{ + createObj: func() interface{} { return &funcStruct{} }, + resultObj: func() interface{} { return &funcStruct{} }, + resultMap: func() map[string]interface{} { + return make(map[string]interface{}) + }, + fields: Arguments{Argument{ + Name: "funcValue", + Type: funcType, + Indexed: true, + }}, + topics: []common.Hash{ + {0, 0, 0, 0, 0, 0, 0, 128, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: true, + }, + } + + return tests +} + +func TestParseTopics(t *testing.T) { + tests := setupTopicsTests() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + createObj := tt.args.createObj() + if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { + t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr) + } + resultObj := tt.args.resultObj() + if !reflect.DeepEqual(createObj, resultObj) { + t.Errorf("parseTopics() = %v, want %v", createObj, resultObj) + } + }) + } +} + +func TestParseTopicsIntoMap(t *testing.T) { + tests := setupTopicsTests() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outMap := make(map[string]interface{}) + if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { + t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr) + } + resultMap := tt.args.resultMap() + if !reflect.DeepEqual(outMap, resultMap) { + t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap) + } + }) + } +} diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 4792283ee8ef..ffa3acafe9c2 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -23,6 +23,8 @@ import ( "regexp" "strconv" "strings" + + "github.com/ethereum/go-ethereum/common" ) // Type enumerator @@ -42,20 +44,19 @@ const ( FunctionTy ) -// Type is the reflection of the supported argument type +// Type is the reflection of the supported argument type. type Type struct { Elem *Type - Kind reflect.Kind - Type reflect.Type Size int T byte // Our own type checking stringKind string // holds the unparsed string for deriving signatures // Tuple relative fields - TupleRawName string // Raw struct name defined in source code, may be empty. - TupleElems []*Type // Type information of all tuple fields - TupleRawNames []string // Raw field name of all tuple fields + TupleRawName string // Raw struct name defined in source code, may be empty. + TupleElems []*Type // Type information of all tuple fields + TupleRawNames []string // Raw field name of all tuple fields + TupleType reflect.Type // Underlying struct of the tuple } var ( @@ -94,20 +95,16 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty if len(intz) == 0 { // is a slice typ.T = SliceTy - typ.Kind = reflect.Slice typ.Elem = &embeddedType - typ.Type = reflect.SliceOf(embeddedType.Type) typ.stringKind = embeddedType.stringKind + sliced } else if len(intz) == 1 { - // is a array + // is an array typ.T = ArrayTy - typ.Kind = reflect.Array typ.Elem = &embeddedType typ.Size, err = strconv.Atoi(intz[0]) if err != nil { return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err) } - typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type) typ.stringKind = embeddedType.stringKind + sliced } else { return Type{}, fmt.Errorf("invalid formatting of array type") @@ -139,36 +136,24 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty // varType is the parsed abi type switch varType := parsedType[1]; varType { case "int": - typ.Kind, typ.Type = reflectIntKindAndType(false, varSize) typ.Size = varSize typ.T = IntTy case "uint": - typ.Kind, typ.Type = reflectIntKindAndType(true, varSize) typ.Size = varSize typ.T = UintTy case "bool": - typ.Kind = reflect.Bool typ.T = BoolTy - typ.Type = reflect.TypeOf(bool(false)) case "address": - typ.Kind = reflect.Array - typ.Type = addressT typ.Size = 20 typ.T = AddressTy case "string": - typ.Kind = reflect.String - typ.Type = reflect.TypeOf("") typ.T = StringTy case "bytes": if varSize == 0 { typ.T = BytesTy - typ.Kind = reflect.Slice - typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0))) } else { typ.T = FixedBytesTy - typ.Kind = reflect.Array typ.Size = varSize - typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0))) } case "tuple": var ( @@ -178,17 +163,20 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty expression string // canonical parameter expression ) expression += "(" + overloadedNames := make(map[string]string) for idx, c := range components { cType, err := NewType(c.Type, c.InternalType, c.Components) if err != nil { return Type{}, err } - if ToCamelCase(c.Name) == "" { - return Type{}, errors.New("abi: purely anonymous or underscored field is not supported") + fieldName, err := overloadedArgName(c.Name, overloadedNames) + if err != nil { + return Type{}, err } + overloadedNames[fieldName] = fieldName fields = append(fields, reflect.StructField{ - Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field. - Type: cType.Type, + Name: fieldName, // reflect.StructOf will panic for any exported field. + Type: cType.GetType(), Tag: reflect.StructTag("json:\"" + c.Name + "\""), }) elems = append(elems, &cType) @@ -199,8 +187,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } } expression += ")" - typ.Kind = reflect.Struct - typ.Type = reflect.StructOf(fields) + + typ.TupleType = reflect.StructOf(fields) typ.TupleElems = elems typ.TupleRawNames = names typ.T = TupleTy @@ -217,10 +205,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } case "function": - typ.Kind = reflect.Array typ.T = FunctionTy typ.Size = 24 - typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0))) default: return Type{}, fmt.Errorf("unsupported arg type: %s", t) } @@ -228,7 +214,57 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty return } -// String implements Stringer +// GetType returns the reflection type of the ABI type. +func (t Type) GetType() reflect.Type { + switch t.T { + case IntTy: + return reflectIntType(false, t.Size) + case UintTy: + return reflectIntType(true, t.Size) + case BoolTy: + return reflect.TypeOf(false) + case StringTy: + return reflect.TypeOf("") + case SliceTy: + return reflect.SliceOf(t.Elem.GetType()) + case ArrayTy: + return reflect.ArrayOf(t.Size, t.Elem.GetType()) + case TupleTy: + return t.TupleType + case AddressTy: + return reflect.TypeOf(common.Address{}) + case FixedBytesTy: + return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0))) + case BytesTy: + return reflect.SliceOf(reflect.TypeOf(byte(0))) + case HashTy: + // hashtype currently not used + return reflect.ArrayOf(32, reflect.TypeOf(byte(0))) + case FixedPointTy: + // fixedpoint type currently not used + return reflect.ArrayOf(32, reflect.TypeOf(byte(0))) + case FunctionTy: + return reflect.ArrayOf(24, reflect.TypeOf(byte(0))) + default: + panic("Invalid type") + } +} + +func overloadedArgName(rawName string, names map[string]string) (string, error) { + fieldName := ToCamelCase(rawName) + if fieldName == "" { + return "", errors.New("abi: purely anonymous or underscored field is not supported") + } + // Handle overloaded fieldNames + _, ok := names[fieldName] + for idx := 0; ok; idx++ { + fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx) + _, ok = names[fieldName] + } + return fieldName, nil +} + +// String implements Stringer. func (t Type) String() (out string) { return t.stringKind } @@ -310,7 +346,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { return append(ret, tail...), nil default: - return packElement(t, v), nil + return packElement(t, v) } } @@ -350,7 +386,7 @@ func isDynamicType(t Type) bool { func getTypeSize(t Type) int { if t.T == ArrayTy && !isDynamicType(*t.Elem) { // Recursively calculate type size if it is a nested array - if t.Elem.T == ArrayTy { + if t.Elem.T == ArrayTy || t.Elem.T == TupleTy { return t.Size * getTypeSize(*t.Elem) } return t.Size * 32 diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go index a2c78dc2e020..8c3aedca6a4d 100644 --- a/accounts/abi/type_test.go +++ b/accounts/abi/type_test.go @@ -36,58 +36,58 @@ func TestTypeRegexp(t *testing.T) { components []ArgumentMarshaling kind Type }{ - {"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}}, - {"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}}, - {"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}}, - {"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}}, - {"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}}, - {"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}}, - {"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}}, - {"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}}, - {"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}}, - {"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}}, - {"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}}, - {"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}}, - {"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}}, - {"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}}, - {"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}}, - {"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}}, - {"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}}, - {"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}}, - {"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}}, - {"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}}, - {"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}}, - {"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}}, - {"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}}, - {"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}}, - {"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}}, - {"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}}, - {"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}}, - {"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}}, - {"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}}, - {"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}}, - {"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}}, - {"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}}, - {"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}}, - {"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}}, - {"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}}, - {"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}}, - {"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}}, - {"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}}, - {"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}}, - {"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}}, - {"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}}, - {"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}}, - {"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}}, - {"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}}, - {"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}}, - {"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}}, - {"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}}, - {"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}}, - {"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}}, - {"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}}, - {"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}}, - {"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}}, + {"bool", nil, Type{T: BoolTy, stringKind: "bool"}}, + {"bool[]", nil, Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}}, + {"bool[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}}, + {"bool[2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}}, + {"bool[][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}}, + {"bool[][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}}, + {"bool[2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}}, + {"bool[2][][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}}, + {"bool[2][2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}}, + {"bool[][][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}}, + {"bool[][2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}}, + {"int8", nil, Type{Size: 8, T: IntTy, stringKind: "int8"}}, + {"int16", nil, Type{Size: 16, T: IntTy, stringKind: "int16"}}, + {"int32", nil, Type{Size: 32, T: IntTy, stringKind: "int32"}}, + {"int64", nil, Type{Size: 64, T: IntTy, stringKind: "int64"}}, + {"int256", nil, Type{Size: 256, T: IntTy, stringKind: "int256"}}, + {"int8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}}, + {"int8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}}, + {"int16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}}, + {"int16[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}}, + {"int32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}}, + {"int32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}}, + {"int64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}}, + {"int64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}}, + {"int256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}}, + {"int256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}}, + {"uint8", nil, Type{Size: 8, T: UintTy, stringKind: "uint8"}}, + {"uint16", nil, Type{Size: 16, T: UintTy, stringKind: "uint16"}}, + {"uint32", nil, Type{Size: 32, T: UintTy, stringKind: "uint32"}}, + {"uint64", nil, Type{Size: 64, T: UintTy, stringKind: "uint64"}}, + {"uint256", nil, Type{Size: 256, T: UintTy, stringKind: "uint256"}}, + {"uint8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}}, + {"uint8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}}, + {"uint16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}}, + {"uint16[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}}, + {"uint32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}}, + {"uint32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}}, + {"uint64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}}, + {"uint64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}}, + {"uint256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}}, + {"uint256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}}, + {"bytes32", nil, Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}}, + {"bytes[]", nil, Type{T: SliceTy, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}}, + {"bytes[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}}, + {"bytes32[]", nil, Type{T: SliceTy, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}}, + {"bytes32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[2]"}}, + {"string", nil, Type{T: StringTy, stringKind: "string"}}, + {"string[]", nil, Type{T: SliceTy, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[]"}}, + {"string[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[2]"}}, + {"address", nil, Type{Size: 20, T: AddressTy, stringKind: "address"}}, + {"address[]", nil, Type{T: SliceTy, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}}, + {"address[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}}, // TODO when fixed types are implemented properly // {"fixed", nil, Type{}}, // {"fixed128x128", nil, Type{}}, @@ -95,14 +95,14 @@ func TestTypeRegexp(t *testing.T) { // {"fixed[2]", nil, Type{}}, // {"fixed128x128[]", nil, Type{}}, // {"fixed128x128[2]", nil, Type{}}, - {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct { + {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct { A int64 `json:"a"` }{}), stringKind: "(int64)", - TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}}, - {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct { + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}}, + {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct { ATypicalParamName int64 `json:"aTypicalParamName"` }{}), stringKind: "(int64)", - TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}}, + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}}, } for _, tt := range tests { @@ -255,7 +255,7 @@ func TestTypeCheck(t *testing.T) { {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"}, {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"}, {"string", nil, "hello world", ""}, - {"string", nil, string(""), ""}, + {"string", nil, "", ""}, {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"}, {"bytes32[]", nil, [][32]byte{{}}, ""}, {"function", nil, [24]byte{}, ""}, @@ -306,3 +306,63 @@ func TestTypeCheck(t *testing.T) { } } } + +func TestInternalType(t *testing.T) { + components := []ArgumentMarshaling{{Name: "a", Type: "int64"}} + internalType := "struct a.b[]" + kind := Type{ + T: TupleTy, + TupleType: reflect.TypeOf(struct { + A int64 `json:"a"` + }{}), + stringKind: "(int64)", + TupleRawName: "ab[]", + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, + TupleRawNames: []string{"a"}, + } + + blob := "tuple" + typ, err := NewType(blob, internalType, components) + if err != nil { + t.Errorf("type %q: failed to parse type string: %v", blob, err) + } + if !reflect.DeepEqual(typ, kind) { + t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(kind))) + } +} + +func TestGetTypeSize(t *testing.T) { + var testCases = []struct { + typ string + components []ArgumentMarshaling + typSize int + }{ + // simple array + {"uint256[2]", nil, 32 * 2}, + {"address[3]", nil, 32 * 3}, + {"bytes32[4]", nil, 32 * 4}, + // array array + {"uint256[2][3][4]", nil, 32 * (2 * 3 * 4)}, + // array tuple + {"tuple[2]", []ArgumentMarshaling{{Name: "x", Type: "bytes32"}, {Name: "y", Type: "bytes32"}}, (32 * 2) * 2}, + // simple tuple + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "uint256"}, {Name: "y", Type: "uint256"}}, 32 * 2}, + // tuple array + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}}, 32 * 2}, + // tuple tuple + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32"}}}}, 32}, + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}, {Name: "y", Type: "uint256"}}}}, 32 * (2 + 1)}, + } + + for i, data := range testCases { + typ, err := NewType(data.typ, "", data.components) + if err != nil { + t.Errorf("type %q: failed to parse type string: %v", data.typ, err) + } + + result := getTypeSize(typ) + if result != data.typSize { + t.Errorf("case %d type %q: get type size error: actual: %d expected: %d", i, data.typ, result, data.typSize) + } + } +} diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index b2e61d06c416..ec0698493627 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -26,52 +26,54 @@ import ( ) var ( - maxUint256 = big.NewInt(0).Add( - big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil), - big.NewInt(-1)) - maxInt256 = big.NewInt(0).Add( - big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil), - big.NewInt(-1)) + // MaxUint256 is the maximum value that can be represented by a uint256. + MaxUint256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1) + // MaxInt256 is the maximum value that can be represented by a int256. + MaxInt256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 255), common.Big1) ) -// reads the integer based on its kind -func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} { - switch kind { - case reflect.Uint8: - return b[len(b)-1] - case reflect.Uint16: - return binary.BigEndian.Uint16(b[len(b)-2:]) - case reflect.Uint32: - return binary.BigEndian.Uint32(b[len(b)-4:]) - case reflect.Uint64: - return binary.BigEndian.Uint64(b[len(b)-8:]) - case reflect.Int8: +// ReadInteger reads the integer based on its kind and returns the appropriate value. +func ReadInteger(typ Type, b []byte) interface{} { + if typ.T == UintTy { + switch typ.Size { + case 8: + return b[len(b)-1] + case 16: + return binary.BigEndian.Uint16(b[len(b)-2:]) + case 32: + return binary.BigEndian.Uint32(b[len(b)-4:]) + case 64: + return binary.BigEndian.Uint64(b[len(b)-8:]) + default: + // the only case left for unsigned integer is uint256. + return new(big.Int).SetBytes(b) + } + } + switch typ.Size { + case 8: return int8(b[len(b)-1]) - case reflect.Int16: + case 16: return int16(binary.BigEndian.Uint16(b[len(b)-2:])) - case reflect.Int32: + case 32: return int32(binary.BigEndian.Uint32(b[len(b)-4:])) - case reflect.Int64: + case 64: return int64(binary.BigEndian.Uint64(b[len(b)-8:])) default: - // the only case lefts for integer is int256/uint256. - // big.SetBytes can't tell if a number is negative, positive on itself. + // the only case left for integer is int256 + // big.SetBytes can't tell if a number is negative or positive in itself. // On EVM, if the returned number > max int256, it is negative. + // A number is > max int256 if the bit at position 255 is set. ret := new(big.Int).SetBytes(b) - if typ == UintTy { - return ret - } - - if ret.Cmp(maxInt256) > 0 { - ret.Add(maxUint256, big.NewInt(0).Neg(ret)) - ret.Add(ret, big.NewInt(1)) + if ret.Bit(255) == 1 { + ret.Add(MaxUint256, new(big.Int).Neg(ret)) + ret.Add(ret, common.Big1) ret.Neg(ret) } return ret } } -// reads a bool +// readBool reads a bool. func readBool(word []byte) (bool, error) { for _, b := range word[:31] { if b != 0 { @@ -89,7 +91,8 @@ func readBool(word []byte) (bool, error) { } // A function type is simply the address with the function selection signature at the end. -// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) +// +// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") @@ -102,20 +105,20 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { return } -// through reflection, creates a fixed array to be read from -func readFixedBytes(t Type, word []byte) (interface{}, error) { +// ReadFixedBytes uses reflection to create a fixed array to be read from. +func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") } // convert - array := reflect.New(t.Type).Elem() + array := reflect.New(t.GetType()).Elem() reflect.Copy(array, reflect.ValueOf(word[0:t.Size])) return array.Interface(), nil } -// iteratively unpack elements +// forEachUnpack iteratively unpack elements. func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) { if size < 0 { return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size) @@ -129,10 +132,10 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) if t.T == SliceTy { // declare our slice - refSlice = reflect.MakeSlice(t.Type, size, size) + refSlice = reflect.MakeSlice(t.GetType(), size, size) } else if t.T == ArrayTy { // declare our array - refSlice = reflect.New(t.Type).Elem() + refSlice = reflect.New(t.GetType()).Elem() } else { return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") } @@ -156,7 +159,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) } func forTupleUnpack(t Type, output []byte) (interface{}, error) { - retval := reflect.New(t.Type).Elem() + retval := reflect.New(t.GetType()).Elem() virtualArgs := 0 for index, elem := range t.TupleElems { marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output) @@ -216,21 +219,23 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { return nil, err } return forTupleUnpack(t, output[begin:]) - } else { - return forTupleUnpack(t, output[index:]) } + return forTupleUnpack(t, output[index:]) case SliceTy: return forEachUnpack(t, output[begin:], 0, length) case ArrayTy: if isDynamicType(*t.Elem) { - offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:])) + offset := binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]) + if offset > uint64(len(output)) { + return nil, fmt.Errorf("abi: toGoType offset greater than output length: offset: %d, len(output): %d", offset, len(output)) + } return forEachUnpack(t, output[offset:], 0, t.Size) } return forEachUnpack(t, output[index:], 0, t.Size) case StringTy: // variable arrays are written at the end of the return bytes return string(output[begin : begin+length]), nil case IntTy, UintTy: - return readInteger(t.T, t.Kind, returnOutput), nil + return ReadInteger(t, returnOutput), nil case BoolTy: return readBool(returnOutput) case AddressTy: @@ -240,7 +245,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { case BytesTy: return output[begin : begin+length], nil case FixedBytesTy: - return readFixedBytes(t, returnOutput) + return ReadFixedBytes(t, returnOutput) case FunctionTy: return readFunctionType(t, returnOutput) default: @@ -248,7 +253,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { } } -// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type. +// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type. func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) { bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32]) bigOffsetEnd.Add(bigOffsetEnd, common.Big32) diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index dfea8db671d8..b88f77805bfa 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -30,6 +30,32 @@ import ( "github.com/stretchr/testify/require" ) +// TestUnpack tests the general pack/unpack tests in packing_test.go +func TestUnpack(t *testing.T) { + for i, test := range packUnpackTests { + t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) { + //Unpack + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) + abi, err := JSON(strings.NewReader(def)) + if err != nil { + t.Fatalf("invalid ABI definition %s: %v", def, err) + } + encb, err := hex.DecodeString(test.packed) + if err != nil { + t.Fatalf("invalid hex %s: %v", test.packed, err) + } + out, err := abi.Unpack("method", encb) + if err != nil { + t.Errorf("test %d (%v) failed: %v", i, test.def, err) + return + } + if !reflect.DeepEqual(test.unpacked, ConvertType(out[0], test.unpacked)) { + t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out[0]) + } + }) + } +} + type unpackTest struct { def string // ABI definition JSON enc string // evm return data @@ -52,16 +78,6 @@ func (test unpackTest) checkError(err error) error { var unpackTests = []unpackTest{ // Bools - { - def: `[{ "type": "bool" }]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: true, - }, - { - def: `[{ "type": "bool" }]`, - enc: "0000000000000000000000000000000000000000000000000000000000000000", - want: false, - }, { def: `[{ "type": "bool" }]`, enc: "0000000000000000000000000000000000000000000000000001000000000001", @@ -75,11 +91,6 @@ var unpackTests = []unpackTest{ err: "abi: improperly encoded boolean value", }, // Integers - { - def: `[{"type": "uint32"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: uint32(1), - }, { def: `[{"type": "uint32"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -92,16 +103,6 @@ var unpackTests = []unpackTest{ want: uint16(0), err: "abi: cannot unmarshal *big.Int in to uint16", }, - { - def: `[{"type": "uint17"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: big.NewInt(1), - }, - { - def: `[{"type": "int32"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: int32(1), - }, { def: `[{"type": "int32"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -114,38 +115,10 @@ var unpackTests = []unpackTest{ want: int16(0), err: "abi: cannot unmarshal *big.Int in to int16", }, - { - def: `[{"type": "int17"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: big.NewInt(1), - }, - { - def: `[{"type": "int256"}]`, - enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - want: big.NewInt(-1), - }, - // Address - { - def: `[{"type": "address"}]`, - enc: "0000000000000000000000000100000000000000000000000000000000000000", - want: common.Address{1}, - }, - // Bytes - { - def: `[{"type": "bytes32"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, { def: `[{"type": "bytes"}]`, enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000", - want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - def: `[{"type": "bytes"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{}, - err: "abi: cannot unmarshal []uint8 in to [32]uint8", + want: [32]byte{1}, }, { def: `[{"type": "bytes32"}]`, @@ -153,245 +126,13 @@ var unpackTests = []unpackTest{ want: []byte(nil), err: "abi: cannot unmarshal [32]uint8 in to []uint8", }, - { - def: `[{"type": "bytes32"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, - // Functions - { - def: `[{"type": "function"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [24]byte{1}, - }, - // Slice and Array - { - def: `[{"type": "uint8[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint8{1, 2}, - }, - { - def: `[{"type": "uint8[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: []uint8{}, - }, - { - def: `[{"type": "uint256[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: []*big.Int{}, - }, - { - def: `[{"type": "uint8[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint8{1, 2}, - }, - // multi dimensional, if these pass, all types that don't require length prefix should pass - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: [][]uint8{}, - }, - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [][]uint8{{1, 2}, {1, 2, 3}}, - }, - { - def: `[{"type": "uint8[2][2]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2][2]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint8[][2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - want: [2][]uint8{{}, {}}, - }, - { - def: `[{"type": "uint8[][2]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", - want: [2][]uint8{{1}, {1}}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: [][2]uint8{}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][2]uint8{{1, 2}}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][2]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint16[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint16{1, 2}, - }, - { - def: `[{"type": "uint16[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint16{1, 2}, - }, - { - def: `[{"type": "uint32[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint32{1, 2}, - }, - { - def: `[{"type": "uint32[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint32{1, 2}, - }, - { - def: `[{"type": "uint32[2][3][4]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018", - want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, - }, - { - def: `[{"type": "uint64[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint64{1, 2}, - }, - { - def: `[{"type": "uint64[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint64{1, 2}, - }, - { - def: `[{"type": "uint256[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []*big.Int{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"type": "uint256[3]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, - }, - { - def: `[{"type": "string[4]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000", - want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"}, - }, - { - def: `[{"type": "string[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000", - want: []string{"Ethereum", "go-ethereum"}, - }, - { - def: `[{"type": "bytes[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000", - want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}}, - }, - { - def: `[{"type": "uint256[2][][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8", - want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}}, - }, - { - def: `[{"type": "int8[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int8{1, 2}, - }, - { - def: `[{"type": "int8[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int8{1, 2}, - }, - { - def: `[{"type": "int16[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int16{1, 2}, - }, - { - def: `[{"type": "int16[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int16{1, 2}, - }, - { - def: `[{"type": "int32[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int32{1, 2}, - }, - { - def: `[{"type": "int32[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int32{1, 2}, - }, - { - def: `[{"type": "int64[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int64{1, 2}, - }, - { - def: `[{"type": "int64[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int64{1, 2}, - }, - { - def: `[{"type": "int256[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []*big.Int{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"type": "int256[3]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, - }, - // struct outputs - { - def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - Int1 *big.Int - Int2 *big.Int - }{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"name":"int_one","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int__one","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int_one_","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - Intone *big.Int - }{big.NewInt(1), big.NewInt(2)}, - }, { def: `[{"name":"___","type":"int256"}]`, enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", want: struct { IntOne *big.Int Intone *big.Int - }{}, - err: "abi: purely underscored output cannot unpack to struct", + }{IntOne: big.NewInt(1)}, }, { def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`, @@ -438,12 +179,37 @@ var unpackTests = []unpackTest{ }{}, err: "abi: purely underscored output cannot unpack to struct", }, + // Make sure only the first argument is consumed + { + def: `[{"name":"int_one","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"name":"int__one","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"name":"int_one_","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, } -func TestUnpack(t *testing.T) { +// TestLocalUnpackTests runs test specially designed only for unpacking. +// All test cases that can be used to test packing and unpacking should move to packing_test.go +func TestLocalUnpackTests(t *testing.T) { for i, test := range unpackTests { t.Run(strconv.Itoa(i), func(t *testing.T) { - def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def) + //Unpack + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) abi, err := JSON(strings.NewReader(def)) if err != nil { t.Fatalf("invalid ABI definition %s: %v", def, err) @@ -453,7 +219,7 @@ func TestUnpack(t *testing.T) { t.Fatalf("invalid hex %s: %v", test.enc, err) } outptr := reflect.New(reflect.TypeOf(test.want)) - err = abi.Unpack(outptr.Interface(), "method", encb) + err = abi.UnpackIntoInterface(outptr.Interface(), "method", encb) if err := test.checkError(err); err != nil { t.Errorf("test %d (%v) failed: %v", i, test.def, err) return @@ -466,7 +232,7 @@ func TestUnpack(t *testing.T) { } } -func TestUnpackSetDynamicArrayOutput(t *testing.T) { +func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) { abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`)) if err != nil { t.Fatal(err) @@ -481,7 +247,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) { ) // test 32 - err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32) + err = abi.UnpackIntoInterface(&out32, "testDynamicFixedBytes32", marshalledReturn32) if err != nil { t.Fatal(err) } @@ -498,7 +264,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) { } // test 15 - err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15) + err = abi.UnpackIntoInterface(&out15, "testDynamicFixedBytes32", marshalledReturn15) if err != nil { t.Fatal(err) } @@ -522,7 +288,7 @@ type methodMultiOutput struct { func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) { const definition = `[ - { "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]` + { "name" : "multi", "type": "function", "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]` var expected = methodMultiOutput{big.NewInt(1), "hello"} abi, err := JSON(strings.NewReader(definition)) @@ -592,14 +358,14 @@ func TestMethodMultiReturn(t *testing.T) { }, { &[]interface{}{new(int)}, &[]interface{}{}, - "abi: insufficient number of elements in the list/array for unpack, want 2, got 1", + "abi: insufficient number of arguments for unpack, want 2, got 1", "Can not unpack into a slice with wrong types", }} for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { require := require.New(t) - err := abi.Unpack(tc.dest, "multi", data) + err := abi.UnpackIntoInterface(tc.dest, "multi", data) if tc.error == "" { require.Nil(err, "Should be able to unpack method outputs.") require.Equal(tc.expected, tc.dest) @@ -611,7 +377,7 @@ func TestMethodMultiReturn(t *testing.T) { } func TestMultiReturnWithArray(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -622,7 +388,7 @@ func TestMultiReturnWithArray(t *testing.T) { ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9} ret2, ret2Exp := new(uint64), uint64(8) - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -634,7 +400,7 @@ func TestMultiReturnWithArray(t *testing.T) { } func TestMultiReturnWithStringArray(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -646,7 +412,7 @@ func TestMultiReturnWithStringArray(t *testing.T) { ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f") ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"} ret4, ret4Exp := new(bool), false - if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -664,7 +430,7 @@ func TestMultiReturnWithStringArray(t *testing.T) { } func TestMultiReturnWithStringSlice(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -684,7 +450,7 @@ func TestMultiReturnWithStringSlice(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"} ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)} - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -700,7 +466,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { // values of nested static arrays count towards the size as well, and any element following // after such nested array argument should be read with the correct offset, // so that it does not read content from the previous array argument. - const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -724,7 +490,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { {{0x411, 0x412, 0x413}, {0x421, 0x422, 0x423}}, } ret2, ret2Exp := new(uint64), uint64(0x9876) - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -737,15 +503,15 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { func TestUnmarshal(t *testing.T) { const definition = `[ - { "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] }, - { "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] }, - { "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] }, - { "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] }, - { "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] }, - { "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] }, - { "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] }, - { "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] }, - { "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]` + { "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] }, + { "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] }, + { "name" : "bytes", "type": "function", "outputs": [ { "type": "bytes" } ] }, + { "name" : "fixed", "type": "function", "outputs": [ { "type": "bytes32" } ] }, + { "name" : "multi", "type": "function", "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] }, + { "name" : "intArraySingle", "type": "function", "outputs": [ { "type": "uint256[3]" } ] }, + { "name" : "addressSliceSingle", "type": "function", "outputs": [ { "type": "address[]" } ] }, + { "name" : "addressSliceDouble", "type": "function", "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] }, + { "name" : "mixedBytes", "type": "function", "stateMutability" : "view", "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]` abi, err := JSON(strings.NewReader(definition)) if err != nil { @@ -763,7 +529,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a")) buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000")) - err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&mixedBytes, "mixedBytes", buff.Bytes()) if err != nil { t.Error(err) } else { @@ -778,7 +544,7 @@ func TestUnmarshal(t *testing.T) { // marshal int var Int *big.Int - err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) + err = abi.UnpackIntoInterface(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) if err != nil { t.Error(err) } @@ -789,7 +555,7 @@ func TestUnmarshal(t *testing.T) { // marshal bool var Bool bool - err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) + err = abi.UnpackIntoInterface(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) if err != nil { t.Error(err) } @@ -806,7 +572,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(bytesOut) var Bytes []byte - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -822,7 +588,7 @@ func TestUnmarshal(t *testing.T) { bytesOut = common.RightPadBytes([]byte("hello"), 64) buff.Write(bytesOut) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -838,7 +604,7 @@ func TestUnmarshal(t *testing.T) { bytesOut = common.RightPadBytes([]byte("hello"), 64) buff.Write(bytesOut) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -848,7 +614,7 @@ func TestUnmarshal(t *testing.T) { } // marshal dynamic bytes output empty - err = abi.Unpack(&Bytes, "bytes", nil) + err = abi.UnpackIntoInterface(&Bytes, "bytes", nil) if err == nil { t.Error("expected error") } @@ -859,7 +625,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005")) buff.Write(common.RightPadBytes([]byte("hello"), 32)) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -873,7 +639,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.RightPadBytes([]byte("hello"), 32)) var hash common.Hash - err = abi.Unpack(&hash, "fixed", buff.Bytes()) + err = abi.UnpackIntoInterface(&hash, "fixed", buff.Bytes()) if err != nil { t.Error(err) } @@ -886,12 +652,12 @@ func TestUnmarshal(t *testing.T) { // marshal error buff.Reset() buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err == nil { t.Error("expected error") } - err = abi.Unpack(&Bytes, "multi", make([]byte, 64)) + err = abi.UnpackIntoInterface(&Bytes, "multi", make([]byte, 64)) if err == nil { t.Error("expected error") } @@ -902,7 +668,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003")) // marshal int array var intArray [3]*big.Int - err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&intArray, "intArraySingle", buff.Bytes()) if err != nil { t.Error(err) } @@ -923,7 +689,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000")) var outAddr []common.Address - err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes()) if err != nil { t.Fatal("didn't expect error:", err) } @@ -950,7 +716,7 @@ func TestUnmarshal(t *testing.T) { A []common.Address B []common.Address } - err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddrStruct, "addressSliceDouble", buff.Bytes()) if err != nil { t.Fatal("didn't expect error:", err) } @@ -978,14 +744,14 @@ func TestUnmarshal(t *testing.T) { buff.Reset() buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100")) - err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes()) if err == nil { t.Fatal("expected error:", err) } } func TestUnpackTuple(t *testing.T) { - const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]` + const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]` abi, err := JSON(strings.NewReader(simpleTuple)) if err != nil { t.Fatal(err) @@ -1001,7 +767,7 @@ func TestUnpackTuple(t *testing.T) { B *big.Int }{new(big.Int), new(big.Int)} - err = abi.Unpack(&v, "tuple", buff.Bytes()) + err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes()) if err != nil { t.Error(err) } else { @@ -1009,12 +775,12 @@ func TestUnpackTuple(t *testing.T) { t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A) } if v.B.Cmp(big.NewInt(-1)) != 0 { - t.Errorf("unexpected value unpacked: want %x, got %x", v.B, -1) + t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B) } } // Test nested tuple - const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[ + const nestedTuple = `[{"name":"tuple","type":"function","outputs":[ {"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]}, {"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}, {"type":"uint256","name":"a"} @@ -1073,7 +839,7 @@ func TestUnpackTuple(t *testing.T) { A: big.NewInt(1), } - err = abi.Unpack(&ret, "tuple", buff.Bytes()) + err = abi.UnpackIntoInterface(&ret, "tuple", buff.Bytes()) if err != nil { t.Error(err) } @@ -1136,7 +902,7 @@ func TestOOMMaliciousInput(t *testing.T) { }, } for i, test := range oomTests { - def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def) + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) abi, err := JSON(strings.NewReader(def)) if err != nil { t.Fatalf("invalid ABI definition %s: %v", def, err) diff --git a/accounts/accounts.go b/accounts/accounts.go index bf5190ad98f7..08a1f0f2b193 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -21,7 +21,7 @@ import ( "fmt" "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" @@ -88,7 +88,7 @@ type Wallet interface { // to discover non zero accounts and automatically add them to list of tracked // accounts. // - // Note, self derivaton will increment the last component of the specified path + // Note, self derivation will increment the last component of the specified path // opposed to decending into a child path to allow discovering accounts starting // from non zero components. // @@ -129,6 +129,8 @@ type Wallet interface { // about which fields or actions are needed. The user may retry by providing // the needed details via SignHashWithPassphrase, or by other means (e.g. unlock // the account in a keystore). + // + // This method should return the signature in 'canonical' format, with v 0 or 1 SignText(account Account, text []byte) ([]byte, error) // SignTextWithPassphrase is identical to Signtext, but also takes a password diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 6089ca984446..17a747db0ed5 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core" @@ -131,6 +130,12 @@ func (api *ExternalSigner) Accounts() []accounts.Account { func (api *ExternalSigner) Contains(account accounts.Account) bool { api.cacheMu.RLock() defer api.cacheMu.RUnlock() + if api.cache == nil { + // If we haven't already fetched the accounts, it's time to do so now + api.cacheMu.RUnlock() + api.Accounts() + api.cacheMu.RLock() + } for _, a := range api.cache { if a.Address == account.Address && (account.URL == (accounts.URL{}) || account.URL == api.URL()) { return true @@ -161,7 +166,7 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d hexutil.Encode(data)); err != nil { return nil, err } - // If V is on 27/28-form, convert to to 0/1 for Clique + // If V is on 27/28-form, convert to 0/1 for Clique if mimeType == accounts.MimetypeClique && (res[64] == 27 || res[64] == 28) { res[64] -= 27 // Transform V from 27/28 to 0/1 for Clique use } @@ -169,19 +174,29 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d } func (api *ExternalSigner) SignText(account accounts.Account, text []byte) ([]byte, error) { - var res hexutil.Bytes + var signature hexutil.Bytes var signAddress = common.NewMixedcaseAddress(account.Address) - if err := api.client.Call(&res, "account_signData", + if err := api.client.Call(&signature, "account_signData", accounts.MimetypeTextPlain, &signAddress, // Need to use the pointer here, because of how MarshalJSON is defined hexutil.Encode(text)); err != nil { return nil, err } - return res, nil + if signature[64] == 27 || signature[64] == 28 { + // If clef is used as a backend, it may already have transformed + // the signature to ethereum-type signature. + signature[64] -= 27 // Transform V from Ethereum-legacy to 0/1 + } + return signature, nil +} + +// signTransactionResult represents the signinig result returned by clef. +type signTransactionResult struct { + Raw hexutil.Bytes `json:"raw"` + Tx *types.Transaction `json:"tx"` } func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - res := ethapi.SignTransactionResult{} data := hexutil.Bytes(tx.Data()) var to *common.MixedcaseAddress if tx.To() != nil { @@ -197,6 +212,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio To: to, From: common.NewMixedcaseAddress(account.Address), } + var res signTransactionResult if err := api.client.Call(&res, "account_signTransaction", args); err != nil { return nil, err } diff --git a/accounts/hd.go b/accounts/hd.go index 75c47611061c..54acea3b261d 100644 --- a/accounts/hd.go +++ b/accounts/hd.go @@ -150,3 +150,31 @@ func (path *DerivationPath) UnmarshalJSON(b []byte) error { *path, err = ParseDerivationPath(dp) return err } + +// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component: +// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N. +func DefaultIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[len(path)-1]-- + return func() DerivationPath { + path[len(path)-1]++ + return path + } +} + +// LedgerLiveIterator creates a bip44 path iterator for Ledger Live. +// Ledger Live increments the third component rather than the fifth component +// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0. +func LedgerLiveIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[2]-- + return func() DerivationPath { + // ledgerLivePathIterator iterates on the third component + path[2]++ + return path + } +} diff --git a/accounts/hd_test.go b/accounts/hd_test.go index b6b23230dc74..0743bbe66628 100644 --- a/accounts/hd_test.go +++ b/accounts/hd_test.go @@ -17,6 +17,7 @@ package accounts import ( + "fmt" "reflect" "testing" ) @@ -61,7 +62,7 @@ func TestHDPathParsing(t *testing.T) { // Weird inputs just to ensure they work {" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}}, - // Invaid derivation paths + // Invalid derivation paths {"", nil}, // Empty relative derivation path {"m", nil}, // Empty absolute derivation path {"m/", nil}, // Missing last derivation component @@ -77,3 +78,41 @@ func TestHDPathParsing(t *testing.T) { } } } + +func testDerive(t *testing.T, next func() DerivationPath, expected []string) { + t.Helper() + for i, want := range expected { + if have := next(); fmt.Sprintf("%v", have) != want { + t.Errorf("step %d, have %v, want %v", i, have, want) + } + } +} + +func TestHdPathIteration(t *testing.T) { + testDerive(t, DefaultIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1", + "m/44'/60'/0'/0/2", "m/44'/60'/0'/0/3", + "m/44'/60'/0'/0/4", "m/44'/60'/0'/0/5", + "m/44'/60'/0'/0/6", "m/44'/60'/0'/0/7", + "m/44'/60'/0'/0/8", "m/44'/60'/0'/0/9", + }) + + testDerive(t, DefaultIterator(LegacyLedgerBaseDerivationPath), + []string{ + "m/44'/60'/0'/0", "m/44'/60'/0'/1", + "m/44'/60'/0'/2", "m/44'/60'/0'/3", + "m/44'/60'/0'/4", "m/44'/60'/0'/5", + "m/44'/60'/0'/6", "m/44'/60'/0'/7", + "m/44'/60'/0'/8", "m/44'/60'/0'/9", + }) + + testDerive(t, LedgerLiveIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/1'/0/0", + "m/44'/60'/2'/0/0", "m/44'/60'/3'/0/0", + "m/44'/60'/4'/0/0", "m/44'/60'/5'/0/0", + "m/44'/60'/6'/0/0", "m/44'/60'/7'/0/0", + "m/44'/60'/8'/0/0", "m/44'/60'/9'/0/0", + }) +} diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go index 73ff6ae9ee6f..8b309321d370 100644 --- a/accounts/keystore/file_cache.go +++ b/accounts/keystore/file_cache.go @@ -32,7 +32,7 @@ import ( type fileCache struct { all mapset.Set // Set of all files from the keystore folder lastMod time.Time // Last time instance when a file was modified - mu sync.RWMutex + mu sync.Mutex } // scan performs a new scan on the given directory, compares against the already diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 5b55175b1f3e..9d5e2cf6a2a7 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -24,7 +24,6 @@ import ( "crypto/ecdsa" crand "crypto/rand" "errors" - "fmt" "math/big" "os" "path/filepath" @@ -44,6 +43,10 @@ var ( ErrLocked = accounts.NewAuthNeededError("password or unlock") ErrNoMatch = errors.New("no key for given address or file") ErrDecrypt = errors.New("could not decrypt key with given password") + + // ErrAccountAlreadyExists is returned if an account attempted to import is + // already present in the keystore. + ErrAccountAlreadyExists = errors.New("account already exists") ) // KeyStoreType is the reflect type of a keystore backend. @@ -67,7 +70,8 @@ type KeyStore struct { updateScope event.SubscriptionScope // Subscription scope tracking current live listeners updating bool // Whether the event notification loop is running - mu sync.RWMutex + mu sync.RWMutex + importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing } type unlocked struct { @@ -443,14 +447,27 @@ func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (ac if err != nil { return accounts.Account{}, err } + ks.importMu.Lock() + defer ks.importMu.Unlock() + + if ks.cache.hasAddress(key.Address) { + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists + } return ks.importKey(key, newPassphrase) } // ImportECDSA stores the given key into the key directory, encrypting it with the passphrase. func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) { + ks.importMu.Lock() + defer ks.importMu.Unlock() + key := newKeyFromECDSA(priv) if ks.cache.hasAddress(key.Address) { - return accounts.Account{}, fmt.Errorf("account already exists") + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists } return ks.importKey(key, passphrase) } diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index a691c5062706..cb5de11c0ddb 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -23,11 +23,14 @@ import ( "runtime" "sort" "strings" + "sync" + "sync/atomic" "testing" "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" ) @@ -333,11 +336,95 @@ func TestWalletNotifications(t *testing.T) { // Shut down the event collector and check events. sub.Unsubscribe() - <-updates + for ev := range updates { + events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]}) + } checkAccounts(t, live, ks.Wallets()) checkEvents(t, wantEvents, events) } +// TestImportExport tests the import functionality of a keystore. +func TestImportECDSA(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + key, err := crypto.GenerateKey() + if err != nil { + t.Fatalf("failed to generate key: %v", key) + } + if _, err = ks.ImportECDSA(key, "old"); err != nil { + t.Errorf("importing failed: %v", err) + } + if _, err = ks.ImportECDSA(key, "old"); err == nil { + t.Errorf("importing same key twice succeeded") + } + if _, err = ks.ImportECDSA(key, "new"); err == nil { + t.Errorf("importing same key twice succeeded") + } +} + +// TestImportECDSA tests the import and export functionality of a keystore. +func TestImportExport(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + if _, err = ks2.Import(json, "old", "old"); err == nil { + t.Errorf("importing with invalid password succeeded") + } + acc2, err := ks2.Import(json, "new", "new") + if err != nil { + t.Errorf("importing failed: %v", err) + } + if acc.Address != acc2.Address { + t.Error("imported account does not match exported account") + } + if _, err = ks2.Import(json, "new", "new"); err == nil { + t.Errorf("importing a key twice succeeded") + } + +} + +// TestImportRace tests the keystore on races. +// This test should fail under -race if importing races. +func TestImportRace(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + var atom uint32 + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + if _, err := ks2.Import(json, "new", "new"); err != nil { + atomic.AddUint32(&atom, 1) + } + + }() + } + wg.Wait() + if atom != 1 { + t.Errorf("Import is racy") + } +} + // checkAccounts checks that all known live accounts are present in the wallet list. func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) { if len(live) != len(wallets) { diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 89cdf0bfca14..dd4d7764e48b 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -230,7 +230,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { key := crypto.ToECDSAUnsafe(keyBytes) return &Key{ - Id: uuid.UUID(keyId), + Id: keyId, Address: crypto.PubkeyToAddress(key.PublicKey), PrivateKey: key, }, nil diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 498067d49730..1066095f6d07 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -19,7 +19,7 @@ package keystore import ( "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -58,7 +58,7 @@ func (w *keystoreWallet) Open(passphrase string) error { return nil } func (w *keystoreWallet) Close() error { return nil } // Accounts implements accounts.Wallet, returning an account list consisting of -// a single account that the plain kestore wallet contains. +// a single account that the plain keystore wallet contains. func (w *keystoreWallet) Accounts() []accounts.Account { return []accounts.Account{w.account} } @@ -93,12 +93,12 @@ func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte return w.keystore.SignHash(account, hash) } -// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { return w.signHash(account, crypto.Keccak256(data)) } -// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { @@ -108,12 +108,14 @@ func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passph return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data)) } +// SignText implements accounts.Wallet, attempting to sign the hash of +// the given text with the given account. func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) { return w.signHash(account, accounts.TextHash(text)) } // SignTextWithPassphrase implements accounts.Wallet, attempting to sign the -// given hash with the given account using passphrase as extra authentication. +// hash of the given text with the given account using passphrase as extra authentication. func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { diff --git a/accounts/manager.go b/accounts/manager.go index 731d12ea30a9..acf41ed8e9e2 100644 --- a/accounts/manager.go +++ b/accounts/manager.go @@ -141,6 +141,11 @@ func (am *Manager) Wallets() []Wallet { am.lock.RLock() defer am.lock.RUnlock() + return am.walletsNoLock() +} + +// walletsNoLock returns all registered wallets. Callers must hold am.lock. +func (am *Manager) walletsNoLock() []Wallet { cpy := make([]Wallet, len(am.wallets)) copy(cpy, am.wallets) return cpy @@ -155,7 +160,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) { if err != nil { return nil, err } - for _, wallet := range am.Wallets() { + for _, wallet := range am.walletsNoLock() { if wallet.URL() == parsed { return wallet, nil } diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 5f939c658627..811f8c695e48 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -220,7 +220,7 @@ func (hub *Hub) refreshWallets() { // Mark the reader as present seen[reader] = struct{}{} - // If we alreay know about this card, skip to the next reader, otherwise clean up + // If we already know about this card, skip to the next reader, otherwise clean up if wallet, ok := hub.wallets[reader]; ok { if err := wallet.ping(); err == nil { continue diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index dd9266cb3124..6476646d7f07 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -33,7 +33,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -312,15 +312,15 @@ func (w *Wallet) Status() (string, error) { } switch { case !w.session.verified && status.PinRetryCount == 0 && status.PukRetryCount == 0: - return fmt.Sprintf("Bricked, waiting for full wipe"), nil + return "Bricked, waiting for full wipe", nil case !w.session.verified && status.PinRetryCount == 0: return fmt.Sprintf("Blocked, waiting for PUK (%d attempts left) and new PIN", status.PukRetryCount), nil case !w.session.verified: return fmt.Sprintf("Locked, waiting for PIN (%d attempts left)", status.PinRetryCount), nil case !status.Initialized: - return fmt.Sprintf("Empty, waiting for initialization"), nil + return "Empty, waiting for initialization", nil default: - return fmt.Sprintf("Online"), nil + return "Online", nil } } @@ -362,7 +362,7 @@ func (w *Wallet) Open(passphrase string) error { return err } // Pairing succeeded, fall through to PIN checks. This will of course fail, - // but we can't return ErrPINNeeded directly here becase we don't know whether + // but we can't return ErrPINNeeded directly here because we don't know whether // a PIN check or a PIN reset is needed. passphrase = "" } @@ -637,7 +637,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // to discover non zero accounts and automatically add them to list of tracked // accounts. // -// Note, self derivaton will increment the last component of the specified path +// Note, self derivation will increment the last component of the specified path // opposed to decending into a child path to allow discovering accounts starting // from non zero components. // diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 64eae64f689a..71f0f9392fc4 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -162,7 +162,7 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return common.Address{}, nil, accounts.ErrWalletClosed } // Ensure the wallet is capable of signing the given transaction - if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 { + if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 { //lint:ignore ST1005 brand name displayed on the console return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2]) } diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index ee539d96535d..9f74e5554f14 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -25,7 +25,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -368,18 +368,22 @@ func (w *wallet) selfDerive() { w.log.Warn("USB wallet nonce retrieval failed", "err", err) break } - // If the next account is empty, stop self-derivation, but add for the last base path + // We've just self-derived a new account, start tracking it locally + // unless the account was empty. + path := make(accounts.DerivationPath, len(nextPaths[i])) + copy(path[:], nextPaths[i][:]) if balance.Sign() == 0 && nonce == 0 { empty = true + // If it indeed was empty, make a log output for it anyway. In the case + // of legacy-ledger, the first account on the legacy-path will + // be shown to the user, even if we don't actively track it if i < len(nextAddrs)-1 { + w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(,, false) to track", + "path", path, "address", nextAddrs[i]) break } } - // We've just self-derived a new account, start tracking it locally - path := make(accounts.DerivationPath, len(nextPaths[i])) - copy(path[:], nextPaths[i][:]) paths = append(paths, path) - account := accounts.Account{ Address: nextAddrs[i], URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)}, @@ -489,7 +493,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // to discover non zero accounts and automatically add them to list of tracked // accounts. // -// Note, self derivaton will increment the last component of the specified path +// Note, self derivation will increment the last component of the specified path // opposed to decending into a child path to allow discovering accounts starting // from non zero components. // diff --git a/appveyor.yml b/appveyor.yml index 0f230bac1457..2bf67d45684d 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,6 +6,7 @@ clone_depth: 5 version: "{branch}.{build}" environment: global: + GO111MODULE: on GOPATH: C:\gopath CC: gcc.exe matrix: @@ -23,13 +24,13 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip - - 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://dl.google.com/go/go1.15.5.windows-%GETH_ARCH%.zip + - 7z x go1.15.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version build_script: - - go run build\ci.go install + - go run build\ci.go install -dlgo after_build: - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds diff --git a/build/checksums.txt b/build/checksums.txt index bb814e33926b..a7a6a657e9bf 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,19 +1,31 @@ # This file contains sha256 checksums of optional build dependencies. -95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 go1.13.4.src.tar.gz +890bba73c5e2b19ffb1180e385ea225059eb008eb91b694875dd86ea48675817 go1.15.6.src.tar.gz +940a73b45993a3bae5792cf324140dded34af97c548af4864d22fd6d49f3bd9f go1.15.6.darwin-amd64.tar.gz +ad187f02158b9a9013ef03f41d14aa69c402477f178825a3940280814bcbb755 go1.15.6.linux-386.tar.gz +3918e6cc85e7eaaa6f859f1bdbaac772e7a825b0eb423c63d3ae68b21f84b844 go1.15.6.linux-amd64.tar.gz +f87515b9744154ffe31182da9341d0a61eb0795551173d242c8cad209239e492 go1.15.6.linux-arm64.tar.gz +40ba9a57764e374195018ef37c38a5fbac9bbce908eab436370631a84bfc5788 go1.15.6.linux-armv6l.tar.gz +5872eff6746a0a5f304272b27cbe9ce186f468454e95749cce01e903fbfc0e17 go1.15.6.windows-386.zip +b7b3808bb072c2bab73175009187fd5a7f20ffe0a31739937003a14c5c4d9006 go1.15.6.windows-amd64.zip +9d9dd5c217c1392f1b2ed5e03e1c71bf4cf8553884e57a38e68fd37fdcfe31a8 go1.15.6.freebsd-386.tar.gz +609f065d855aed5a0b40ef0245aacbcc0b4b7882dc3b1e75ae50576cf25265ee go1.15.6.freebsd-amd64.tar.gz +d4174fc217e749ac049eacc8827df879689f2246ac230d04991ae7df336f7cb2 go1.15.6.linux-ppc64le.tar.gz +839cc6b67687d8bb7cb044e4a9a2eac0c090765cc8ec55ffe714dfb7cd51cf3a go1.15.6.linux-s390x.tar.gz -1fcbc9e36f4319eeed02beb8cfd1b3d425ffc2f90ddf09a80f18d5064c51e0cb golangci-lint-1.21.0-linux-386.tar.gz -267b4066e67139a38d29499331a002d6a29ad5be7aafc83db3b1e88f1b027f90 golangci-lint-1.21.0-linux-armv6.tar.gz -a602c1f25f90e46e621019cff0a8cb3f4e1837011f3537f15e730d6a9ebf507b golangci-lint-1.21.0-freebsd-armv7.tar.gz -2c861f8dc56b560474aa27cab0c075991628cc01af3451e27ac82f5d10d5106b golangci-lint-1.21.0-linux-amd64.tar.gz -a1c39e055280e755acaa906e7abfc20b99a5c28be8af541c57fbc44abbb20dde golangci-lint-1.21.0-linux-arm64.tar.gz -a8f8bda8c6a4136acf858091077830b1e83ad5612606cb69d5dced869ce00bd8 golangci-lint-1.21.0-linux-ppc64le.tar.gz -0a8a8c3bc660ccbca668897ab520f7ee9878f16cc8e4dd24fe46236ceec97ba3 golangci-lint-1.21.0-freebsd-armv6.tar.gz -699b07f45e216571f54002bcbd83b511c4801464a422162158e299587b095b18 golangci-lint-1.21.0-freebsd-amd64.tar.gz -980fb4993942154bb5c8129ea3b86de09574fe81b24384ebb58cd7a9d2f04483 golangci-lint-1.21.0-linux-armv7.tar.gz -f15b689088a47f20d5d3c1d945e9ee7c6238f2b84ea468b5f886cf8713dce62e golangci-lint-1.21.0-windows-386.zip -2e40ded7adcf11e59013cb15c24438b15a86526ca241edfcfdf1abd73a5280a8 golangci-lint-1.21.0-windows-amd64.zip -6052c7cfea4d6dc2fc722f6c12792a5ec087420198db495afffbc22052653bf7 golangci-lint-1.21.0-freebsd-386.tar.gz -ca00b8eacf9af14a71b908b4149606c762aa5c0eac781e74ca0abedfdfdf6c8c golangci-lint-1.21.0-linux-s390x.tar.gz -1365455940c342f95718159d89d66ad2eef19f0846c3e87023e915a3527b929f golangci-lint-1.21.0-darwin-386.tar.gz -2b2713ec5007e67883aa501eebb81f22abfab0cf0909134ba90f60a066db3760 golangci-lint-1.21.0-darwin-amd64.tar.gz +d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz +bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip +bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint-1.27.0-windows-amd64.zip +0e2a57d6ba709440d3ed018ef1037465fa010ed02595829092860e5cf863042e golangci-lint-1.27.0-freebsd-386.tar.gz +90205fc42ab5ed0096413e790d88ac9b4ed60f4c47e576d13dc0660f7ed4b013 golangci-lint-1.27.0-linux-arm64.tar.gz +8d345e4e88520e21c113d81978e89ad77fc5b13bfdf20e5bca86b83fc4261272 golangci-lint-1.27.0-linux-amd64.tar.gz +cc619634a77f18dc73df2a0725be13116d64328dc35131ca1737a850d6f76a59 golangci-lint-1.27.0-freebsd-armv7.tar.gz +fe683583cfc9eeec83e498c0d6159d87b5e1919dbe4b6c3b3913089642906069 golangci-lint-1.27.0-linux-s390x.tar.gz +058f5579bee75bdaacbaf75b75e1369f7ad877fd8b3b145aed17a17545de913e golangci-lint-1.27.0-freebsd-armv6.tar.gz +38e1e3dadbe3f56ab62b4de82ee0b88e8fad966d8dfd740a26ef94c2edef9818 golangci-lint-1.27.0-linux-armv6.tar.gz +071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint.exe-1.27.0-windows-386.zip +071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint-1.27.0-windows-386.zip +5f37e2b33914ecddb7cad38186ef4ec61d88172fc04f930fa0267c91151ff306 golangci-lint-1.27.0-linux-386.tar.gz +4d94cfb51fdebeb205f1d5a349ac2b683c30591c5150708073c1c329e15965f0 golangci-lint-1.27.0-freebsd-amd64.tar.gz +52572ba8ff07d5169c2365d3de3fec26dc55a97522094d13d1596199580fa281 golangci-lint-1.27.0-linux-ppc64le.tar.gz +3fb1a1683a29c6c0a8cd76135f62b606fbdd538d5a7aeab94af1af70ffdc2fd4 golangci-lint-1.27.0-darwin-amd64.tar.gz diff --git a/build/ci.go b/build/ci.go index 1ec8e018324b..73d896162983 100644 --- a/build/ci.go +++ b/build/ci.go @@ -26,7 +26,7 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests lint -- runs certain pre-selected linters - archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer @@ -46,12 +46,11 @@ import ( "encoding/base64" "flag" "fmt" - "go/parser" - "go/token" "io/ioutil" "log" "os" "os/exec" + "path" "path/filepath" "regexp" "runtime" @@ -59,6 +58,7 @@ import ( "time" "github.com/cespare/cp" + "github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/params" ) @@ -79,7 +79,6 @@ var ( executablePath("geth"), executablePath("puppeth"), executablePath("rlpdump"), - executablePath("wnode"), executablePath("clef"), } @@ -109,10 +108,6 @@ var ( BinaryName: "rlpdump", Description: "Developer utility tool that prints RLP structures.", }, - { - BinaryName: "wnode", - Description: "Ethereum Whisper diagnostic tool", - }, { BinaryName: "clef", Description: "Ethereum account management tool.", @@ -139,18 +134,25 @@ var ( // Note: zesty is unsupported because it was officially deprecated on Launchpad. // Note: artful is unsupported because it was officially deprecated on Launchpad. // Note: cosmic is unsupported because it was officially deprecated on Launchpad. + // Note: disco is unsupported because it was officially deprecated on Launchpad. + // Note: eoan is unsupported because it was officially deprecated on Launchpad. debDistroGoBoots = map[string]string{ "trusty": "golang-1.11", "xenial": "golang-go", "bionic": "golang-go", - "disco": "golang-go", - "eoan": "golang-go", + "focal": "golang-go", + "groovy": "golang-go", } debGoBootPaths = map[string]string{ "golang-1.11": "/usr/lib/go-1.11", "golang-go": "/usr/lib/go", } + + // This is the version of go that will be downloaded by + // + // go run ci.go install -dlgo + dlgoVersion = "1.15.6" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -201,116 +203,130 @@ func main() { func doInstall(cmdline []string) { var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") arch = flag.String("arch", "", "Architecture to cross build for") cc = flag.String("cc", "", "C compiler to cross build with") ) flag.CommandLine.Parse(cmdline) env := build.Env() - // Check Go version. People regularly open issues about compilation + // Check local Go version. People regularly open issues about compilation // failure with outdated Go. This should save them the trouble. if !strings.Contains(runtime.Version(), "devel") { // Figure out the minor version number since we can't textually compare (1.10 < 1.9) var minor int fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor) - - if minor < 9 { + if minor < 13 { log.Println("You have Go version", runtime.Version()) - log.Println("go-ethereum requires at least Go version 1.9 and cannot") + log.Println("go-ethereum requires at least Go version 1.13 and cannot") log.Println("be compiled with an earlier version. Please upgrade your Go installation.") os.Exit(1) } } - // Compile packages given as arguments, or everything if there are no arguments. - packages := []string{"./..."} - if flag.NArg() > 0 { - packages = flag.Args() + + // Choose which go command we're going to use. + var gobuild *exec.Cmd + if !*dlgo { + // Default behavior: use the go version which runs ci.go right now. + gobuild = goTool("build") + } else { + // Download of Go requested. This is for build environments where the + // installed version is too old and cannot be upgraded easily. + cachedir := filepath.Join("build", "cache") + goroot := downloadGo(runtime.GOARCH, runtime.GOOS, cachedir) + gobuild = localGoTool(goroot, "build") } - if *arch == "" || *arch == runtime.GOARCH { - goinstall := goTool("install", buildFlags(env)...) - if runtime.GOARCH == "arm64" { - goinstall.Args = append(goinstall.Args, "-p", "1") - } - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) - return + // Configure environment for cross build. + if *arch != "" || *arch != runtime.GOARCH { + gobuild.Env = append(gobuild.Env, "CGO_ENABLED=1") + gobuild.Env = append(gobuild.Env, "GOARCH="+*arch) } - // If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds - if *arch == "arm" { - os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm")) - for _, path := range filepath.SplitList(build.GOPATH()) { - os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm")) - } + + // Configure C compiler. + if *cc != "" { + gobuild.Env = append(gobuild.Env, "CC="+*cc) + } else if os.Getenv("CC") != "" { + gobuild.Env = append(gobuild.Env, "CC="+os.Getenv("CC")) + } + + // arm64 CI builders are memory-constrained and can't handle concurrent builds, + // better disable it. This check isn't the best, it should probably + // check for something in env instead. + if runtime.GOARCH == "arm64" { + gobuild.Args = append(gobuild.Args, "-p", "1") } - // Seems we are cross compiling, work around forbidden GOBIN - goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...) - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...) - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) + // Put the default settings in. + gobuild.Args = append(gobuild.Args, buildFlags(env)...) - if cmds, err := ioutil.ReadDir("cmd"); err == nil { - for _, cmd := range cmds { - pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly) - if err != nil { - log.Fatal(err) - } - for name := range pkgs { - if name == "main" { - gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...) - gobuild.Args = append(gobuild.Args, "-v") - gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...) - gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name())) - build.MustRun(gobuild) - break - } - } - } + // We use -trimpath to avoid leaking local paths into the built executables. + gobuild.Args = append(gobuild.Args, "-trimpath") + + // Show packages during build. + gobuild.Args = append(gobuild.Args, "-v") + + // Now we choose what we're even building. + // Default: collect all 'main' packages in cmd/ and build those. + packages := flag.Args() + if len(packages) == 0 { + packages = build.FindMainPackages("./cmd") + } + + // Do the build! + for _, pkg := range packages { + args := make([]string, len(gobuild.Args)) + copy(args, gobuild.Args) + args = append(args, "-o", executablePath(path.Base(pkg))) + args = append(args, pkg) + build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) } } +// buildFlags returns the go tool flags for building. func buildFlags(env build.Environment) (flags []string) { var ld []string if env.Commit != "" { ld = append(ld, "-X", "main.gitCommit="+env.Commit) ld = append(ld, "-X", "main.gitDate="+env.Date) } + // Strip DWARF on darwin. This used to be required for certain things, + // and there is no downside to this, so we just keep doing it. if runtime.GOOS == "darwin" { ld = append(ld, "-s") } - if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } return flags } +// goTool returns the go tool. This uses the Go version which runs ci.go. func goTool(subcmd string, args ...string) *exec.Cmd { - return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...) + cmd := build.GoTool(subcmd, args...) + goToolSetEnv(cmd) + return cmd } -func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd { - cmd := build.GoTool(subcmd, args...) - cmd.Env = []string{"GOPATH=" + build.GOPATH()} - if arch == "" || arch == runtime.GOARCH { - cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) - } else { - cmd.Env = append(cmd.Env, "CGO_ENABLED=1") - cmd.Env = append(cmd.Env, "GOARCH="+arch) - } - if cc != "" { - cmd.Env = append(cmd.Env, "CC="+cc) - } +// localGoTool returns the go tool from the given GOROOT. +func localGoTool(goroot string, subcmd string, args ...string) *exec.Cmd { + gotool := filepath.Join(goroot, "bin", "go") + cmd := exec.Command(gotool, subcmd) + goToolSetEnv(cmd) + cmd.Env = append(cmd.Env, "GOROOT="+goroot) + cmd.Args = append(cmd.Args, args...) + return cmd +} + +// goToolSetEnv forwards the build environment to the go tool. +func goToolSetEnv(cmd *exec.Cmd) { + cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") { + if strings.HasPrefix(e, "GOBIN=") || strings.HasPrefix(e, "CC=") { continue } cmd.Env = append(cmd.Env, e) } - return cmd } // Running The Tests @@ -332,7 +348,7 @@ func doTest(cmdline []string) { // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. gotest := goTool("test", buildFlags(env)...) - gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m") + gotest.Args = append(gotest.Args, "-p", "1") if *coverage { gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") } @@ -363,7 +379,7 @@ func doLint(cmdline []string) { // downloadLinter downloads and unpacks golangci-lint. func downloadLinter(cachedir string) string { - const version = "1.21.0" + const version = "1.27.0" csdb := build.MustLoadChecksums("build/checksums.txt") base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH) @@ -372,7 +388,7 @@ func downloadLinter(cachedir string) string { if err := csdb.DownloadFile(url, archivePath); err != nil { log.Fatal(err) } - if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil { + if err := build.ExtractArchive(archivePath, cachedir); err != nil { log.Fatal(err) } return filepath.Join(cachedir, base, "golangci-lint") @@ -381,11 +397,12 @@ func downloadLinter(cachedir string) string { // Release Packaging func doArchive(cmdline []string) { var ( - arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") - atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) - ext string + arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") + atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + ext string ) flag.CommandLine.Parse(cmdline) switch *atype { @@ -412,7 +429,7 @@ func doArchive(cmdline []string) { log.Fatal(err) } for _, archive := range []string{geth, alltools} { - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -432,7 +449,7 @@ func archiveBasename(arch string, archiveVersion string) string { return platform + "-" + archiveVersion } -func archiveUpload(archive string, blobstore string, signer string) error { +func archiveUpload(archive string, blobstore string, signer string, signifyVar string) error { // If signing was requested, generate the signature files if signer != "" { key := getenvBase64(signer) @@ -440,6 +457,14 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + key := os.Getenv(signifyVar) + untrustedComment := "verify with geth-release.pub" + trustedComment := fmt.Sprintf("%s (%s)", archive, time.Now().UTC().Format(time.RFC1123)) + if err := signify.SignFile(archive, archive+".sig", key, untrustedComment, trustedComment); err != nil { + return err + } + } // If uploading to Azure was requested, push the archive possibly with its signature if blobstore != "" { auth := build.AzureBlobstoreConfig{ @@ -455,6 +480,11 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + if err := build.AzureBlobstoreUpload(archive+".sig", filepath.Base(archive+".sig"), auth); err != nil { + return err + } + } } return nil } @@ -478,13 +508,12 @@ func maybeSkipArchive(env build.Environment) { // Debian Packaging func doDebianSource(cmdline []string) { var ( - goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`) - cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) - signer = flag.String("signer", "", `Signing key name, also used as package author`) - upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) - sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) - workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) - now = time.Now() + cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) + signer = flag.String("signer", "", `Signing key name, also used as package author`) + upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) + sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) + workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) + now = time.Now() ) flag.CommandLine.Parse(cmdline) *workdir = makeWorkdir(*workdir) @@ -499,10 +528,10 @@ func doDebianSource(cmdline []string) { } // Download and verify the Go source package. - gobundle := downloadGoSources(*goversion, *cachedir) + gobundle := downloadGoSources(*cachedir) // Download all the dependencies needed to build the sources and run the ci script - srcdepfetch := goTool("install", "-n", "./...") + srcdepfetch := goTool("mod", "download") srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) build.MustRun(srcdepfetch) @@ -518,7 +547,7 @@ func doDebianSource(cmdline []string) { pkgdir := stageDebianSource(*workdir, meta) // Add Go source code - if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil { + if err := build.ExtractArchive(gobundle, pkgdir); err != nil { log.Fatalf("Failed to extract Go sources: %v", err) } if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { @@ -550,9 +579,10 @@ func doDebianSource(cmdline []string) { } } -func downloadGoSources(version string, cachedir string) string { +// downloadGoSources downloads the Go source tarball. +func downloadGoSources(cachedir string) string { csdb := build.MustLoadChecksums("build/checksums.txt") - file := fmt.Sprintf("go%s.src.tar.gz", version) + file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion) url := "https://dl.google.com/go/" + file dst := filepath.Join(cachedir, file) if err := csdb.DownloadFile(url, dst); err != nil { @@ -561,6 +591,41 @@ func downloadGoSources(version string, cachedir string) string { return dst } +// downloadGo downloads the Go binary distribution and unpacks it into a temporary +// directory. It returns the GOROOT of the unpacked toolchain. +func downloadGo(goarch, goos, cachedir string) string { + if goarch == "arm" { + goarch = "armv6l" + } + + csdb := build.MustLoadChecksums("build/checksums.txt") + file := fmt.Sprintf("go%s.%s-%s", dlgoVersion, goos, goarch) + if goos == "windows" { + file += ".zip" + } else { + file += ".tar.gz" + } + url := "https://golang.org/dl/" + file + dst := filepath.Join(cachedir, file) + if err := csdb.DownloadFile(url, dst); err != nil { + log.Fatal(err) + } + + ucache, err := os.UserCacheDir() + if err != nil { + log.Fatal(err) + } + godir := filepath.Join(ucache, fmt.Sprintf("geth-go-%s-%s-%s", dlgoVersion, goos, goarch)) + if err := build.ExtractArchive(dst, godir); err != nil { + log.Fatal(err) + } + goroot, err := filepath.Abs(filepath.Join(godir, "go")) + if err != nil { + log.Fatal(err) + } + return goroot +} + func ppaUpload(workdir, ppa, sshUser string, files []string) { p := strings.Split(ppa, "/") if len(p) != 2 { @@ -756,6 +821,7 @@ func doWindowsInstaller(cmdline []string) { var ( arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging") signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`) + signify = flag.String("signify key", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`) upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) ) @@ -817,7 +883,7 @@ func doWindowsInstaller(cmdline []string) { filepath.Join(*workdir, "geth.nsi"), ) // Sign and publish installer. - if err := archiveUpload(installer, *upload, *signer); err != nil { + if err := archiveUpload(installer, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -826,10 +892,11 @@ func doWindowsInstaller(cmdline []string) { func doAndroidArchive(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) - upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) + upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() @@ -845,6 +912,7 @@ func doAndroidArchive(cmdline []string) { if *local { // If we're building locally, copy bundle to build dir and skip Maven os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar")) + os.Rename("geth-sources.jar", filepath.Join(GOBIN, "geth-sources.jar")) return } meta := newMavenMetadata(env) @@ -857,7 +925,7 @@ func doAndroidArchive(cmdline []string) { archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" os.Rename("geth.aar", archive) - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } // Sign and upload all the artifacts to Maven Central @@ -888,15 +956,15 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd { cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd) cmd.Args = append(cmd.Args, args...) cmd.Env = []string{ - "GOPATH=" + build.GOPATH(), "PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"), } for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") { + if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") { continue } cmd.Env = append(cmd.Env, e) } + cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) return cmd } @@ -950,10 +1018,11 @@ func newMavenMetadata(env build.Environment) mavenMetadata { func doXCodeFramework(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() @@ -965,7 +1034,7 @@ func doXCodeFramework(cmdline []string) { if *local { // If we're building locally, use the build folder and stop afterwards - bind.Dir, _ = filepath.Abs(GOBIN) + bind.Dir = GOBIN build.MustRun(bind) return } @@ -981,14 +1050,14 @@ func doXCodeFramework(cmdline []string) { maybeSkipArchive(env) // Sign and upload the framework to Azure - if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil { + if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { log.Fatal(err) } // Prepare and upload a PodSpec to CocoaPods if *deploy != "" { meta := newPodMetadata(env, archive) build.Render("build/pod.podspec", "Geth.podspec", 0755, meta) - build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings", "--verbose") + build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings") } } @@ -1078,7 +1147,6 @@ func xgoTool(args []string) *exec.Cmd { cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, []string{ - "GOPATH=" + build.GOPATH(), "GOBIN=" + GOBIN, }...) return cmd @@ -1107,6 +1175,8 @@ func doPurge(cmdline []string) { if err != nil { log.Fatal(err) } + fmt.Printf("Found %d blobs\n", len(blobs)) + // Iterate over the blobs, collect and sort all unstable builds for i := 0; i < len(blobs); i++ { if !strings.Contains(blobs[i].Name, "unstable") { @@ -1128,6 +1198,7 @@ func doPurge(cmdline []string) { break } } + fmt.Printf("Deleting %d blobs\n", len(blobs)) // Delete all marked as such and return if err := build.AzureBlobstoreDelete(auth, blobs); err != nil { log.Fatal(err) diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 983b87af1639..0677ef91e404 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -6,6 +6,7 @@ # Launchpad rejects Go's access to $HOME, use custom folders export GOCACHE=/tmp/go-build +export GOPATH=/tmp/gopath export GOROOT_BOOTSTRAP={{.GoBootPath}} override_dh_auto_clean: @@ -19,10 +20,11 @@ override_dh_auto_build: # We can't download external go modules within Launchpad, so we're shipping the # entire dependency source cache with go-ethereum. - (mkdir -p build/_workspace/pkg/mod && mv .mod/* build/_workspace/pkg/mod) + mkdir -p $(GOPATH)/pkg + mv .mod $(GOPATH)/pkg/mod # A fresh Go was built, all dependency downloads faked, hope build works now - build/env.sh ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} + ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} override_dh_auto_test: diff --git a/build/env.sh b/build/env.sh deleted file mode 100755 index 3914555d1bbd..000000000000 --- a/build/env.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -set -e - -if [ ! -f "build/env.sh" ]; then - echo "$0 must be run from the root of the repository." - exit 2 -fi - -# Create fake Go workspace if it doesn't exist yet. -workspace="$PWD/build/_workspace" -root="$PWD" -ethdir="$workspace/src/github.com/ethereum" -if [ ! -L "$ethdir/go-ethereum" ]; then - mkdir -p "$ethdir" - cd "$ethdir" - ln -s ../../../../../. go-ethereum - cd "$root" -fi - -# Set up the environment to use the workspace. -GOPATH="$workspace" -export GOPATH - -# Run the command inside the workspace. -cd "$ethdir/go-ethereum" -PWD="$ethdir/go-ethereum" - -# Launch the arguments with the configured environment. -exec "$@" diff --git a/build/nsis.install.nsh b/build/nsis.install.nsh index 57ef5a37c6a4..9b73148a4497 100644 --- a/build/nsis.install.nsh +++ b/build/nsis.install.nsh @@ -19,9 +19,9 @@ Section "Geth" GETH_IDX # Create start menu launcher createDirectory "$SMPROGRAMS\${APPNAME}" - createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512" - createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" "" - createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" "" + createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" + createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" + createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" # Firewall - remove rules (if exists) SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)" diff --git a/cmd/abidump/main.go b/cmd/abidump/main.go new file mode 100644 index 000000000000..35cbcbb0ed2d --- /dev/null +++ b/cmd/abidump/main.go @@ -0,0 +1,74 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "encoding/hex" + "flag" + "fmt" + "os" + "strings" + + "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/fourbyte" +) + +func init() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, ` +Parses the given ABI data and tries to interpret it from the fourbyte database.`) + } +} + +func parse(data []byte) { + db, err := fourbyte.New() + if err != nil { + die(err) + } + messages := core.ValidationMessages{} + db.ValidateCallData(nil, data, &messages) + for _, m := range messages.Messages { + fmt.Printf("%v: %v\n", m.Typ, m.Message) + } +} + +// Example +// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000 +func main() { + flag.Parse() + + switch { + case flag.NArg() == 1: + hexdata := flag.Arg(0) + data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x")) + if err != nil { + die(err) + } + parse(data) + default: + fmt.Fprintln(os.Stderr, "Error: one argument needed") + flag.Usage() + os.Exit(2) + } +} + +func die(args ...interface{}) { + fmt.Fprintln(os.Stderr, args...) + os.Exit(1) +} diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index 252d8ef80002..a74b0396d40f 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -21,13 +21,16 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "regexp" "strings" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/compiler" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" ) @@ -98,7 +101,7 @@ var ( ) func init() { - app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") + app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Flags = []cli.Flag{ abiFlag, binFlag, @@ -115,7 +118,7 @@ func init() { aliasFlag, } app.Action = utils.MigrateFlags(abigen) - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } func abigen(c *cli.Context) error { @@ -193,10 +196,22 @@ func abigen(c *cli.Context) error { utils.Fatalf("Failed to build Solidity contract: %v", err) } case c.GlobalIsSet(vyFlag.Name): - contracts, err = compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name)) + output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name)) if err != nil { utils.Fatalf("Failed to build Vyper contract: %v", err) } + contracts = make(map[string]*compiler.Contract) + for n, contract := range output { + name := n + // Sanitize the combined json names to match the + // format expected by solidity. + if !strings.Contains(n, ":") { + // Remove extra path components + name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy")) + } + contracts[name] = contract + } + case c.GlobalIsSet(jsonFlag.Name): jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name)) if err != nil { diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index f6e2a14c3bd2..6c9ff615a18b 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -44,7 +44,7 @@ func main() { natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:)") netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)") runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode") - verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)") + verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)") vmodule = flag.String("vmodule", "", "log verbosity pattern") nodeKey *ecdsa.PrivateKey diff --git a/cmd/checkpoint-admin/README.md b/cmd/checkpoint-admin/README.md new file mode 100644 index 000000000000..43e3785ec2fa --- /dev/null +++ b/cmd/checkpoint-admin/README.md @@ -0,0 +1,103 @@ +## Checkpoint-admin + +Checkpoint-admin is a tool for updating checkpoint oracle status. It provides a series of functions including deploying checkpoint oracle contract, signing for new checkpoints, and updating checkpoints in the checkpoint oracle contract. + +### Checkpoint + +In the LES protocol, there is an important concept called checkpoint. In simple terms, whenever a certain number of blocks are generated on the blockchain, a new checkpoint is generated which contains some important information such as + +* Block hash at checkpoint +* Canonical hash trie root at checkpoint +* Bloom trie root at checkpoint + +*For a more detailed introduction to checkpoint, please see the LES [spec](https://github.com/ethereum/devp2p/blob/master/caps/les.md).* + +Using this information, light clients can skip all historical block headers when synchronizing data and start synchronization from this checkpoint. Therefore, as long as the light client can obtain some latest and correct checkpoints, the amount of data and time for synchronization will be greatly reduced. + +However, from a security perspective, the most critical step in a synchronization algorithm based on checkpoints is to determine whether the checkpoint used by the light client is correct. Otherwise, all blockchain data synchronized based on this checkpoint may be wrong. For this we provide two different ways to ensure the correctness of the checkpoint used by the light client. + +#### Hardcoded checkpoint + +There are several hardcoded checkpoints in the [source code](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L38) of the go-ethereum project. These checkpoints are updated by go-ethereum developers when new versions of software are released. Because light client users trust Geth developers to some extent, hardcoded checkpoints in the code can also be considered correct. + +#### Checkpoint oracle + +Hardcoded checkpoints can solve the problem of verifying the correctness of checkpoints (although this is a more centralized solution). But the pain point of this solution is that developers can only update checkpoints when a new version of software is released. In addition, light client users usually do not keep the Geth version they use always up to date. So hardcoded checkpoints used by users are generally stale. Therefore, it still needs to download a large amount of blockchain data during synchronization. + +Checkpoint oracle is a more flexible solution. In simple terms, this is a smart contract that is deployed on the blockchain. The smart contract records several designated trusted signers. Whenever enough trusted signers have issued their signatures for the same checkpoint, it can be considered that the checkpoint has been authenticated by the signers. Checkpoints authenticated by trusted signers can be considered correct. + +So this way, even without updating the software version, as long as the trusted signers regularly update the checkpoint in oracle on time, the light client can always use the latest and verified checkpoint for data synchronization. + +### Usage + +Checkpoint-admin is a command line tool designed for checkpoint oracle. Users can easily deploy contracts and update checkpoints through this tool. + +#### Install + +```shell +go get github.com/ethereum/go-ethereum/cmd/checkpoint-admin +``` + +#### Deploy + +Deploy checkpoint oracle contract. `--signers` indicates the specified trusted signer, and `--threshold` indicates the minimum number of signatures required by trusted signers to update a checkpoint. + +```shell +checkpoint-admin deploy --rpc --clef --signer --signers --threshold 1 +``` + +It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/clef/tutorial) . + +#### Sign + +Checkpoint-admin provides two different modes of signing. You can automatically obtain the current stable checkpoint and sign it interactively, and you can also use the information provided by the command line flags to sign checkpoint offline. + +**Interactive mode** + +```shell +checkpoint-admin sign --clef --signer --rpc +``` + +*It is worth noting that the connected Geth node can be a fullnode or a light client. If it is fullnode, you must enable the LES protocol. E.G. add `--light.serv 50` to the startup command line flags*. + +**Offline mode** + +```shell +checkpoint-admin sign --clef --signer --index --hash --oracle +``` + +*CHECKPOINT_HASH is obtained based on this [calculation method](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L251).* + +#### Publish + +Collect enough signatures from different trusted signers for the same checkpoint and submit them to oracle to update the "authenticated" checkpoint in the contract. + +```shell +checkpoint-admin publish --clef --rpc --signer --index --signatures +``` + +#### Status query + +Check the latest status of checkpoint oracle. + +```shell +checkpoint-admin status --rpc +``` + +### Enable checkpoint oracle in your private network + +Currently, only the Ethereum mainnet and the default supported test networks (ropsten, rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract. + +* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml` +* Edit the configuration file and add the following information + +```toml +[Eth.CheckpointOracle] +Address = CHECKPOINT_ORACLE_ADDRESS +Signers = [TRUSTED_SIGNER_1, ..., TRUSTED_SIGNER_N] +Threshold = THRESHOLD +``` + +* Start geth with the modified configuration file + +*In the private network, all fullnodes and light clients need to be started using the same checkpoint oracle settings.* \ No newline at end of file diff --git a/cmd/checkpoint-admin/main.go b/cmd/checkpoint-admin/main.go index b4d8e0db5acf..0fb553214778 100644 --- a/cmd/checkpoint-admin/main.go +++ b/cmd/checkpoint-admin/main.go @@ -22,8 +22,8 @@ import ( "fmt" "os" - "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/fdlimit" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" ) @@ -37,7 +37,7 @@ var ( var app *cli.App func init() { - app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") + app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Commands = []cli.Command{ commandStatus, commandDeploy, @@ -48,7 +48,7 @@ func init() { oracleFlag, nodeURLFlag, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } // Commonly used command line flags. diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 90afe8c8c8e7..700f0a144b03 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -9,7 +9,7 @@ Clef can run as a daemon on the same machine, off a usb-stick like [USB armory]( Check out the * [CLI tutorial](tutorial.md) for some concrete examples on how Clef works. -* [Setup docs](docs/setup.md) for infos on how to configure Clef on QubesOS or USB Armory. +* [Setup docs](docs/setup.md) for information on how to configure Clef on QubesOS or USB Armory. * [Data types](datatypes.md) for details on the communication messages between Clef and an external UI. ## Command line flags @@ -33,12 +33,12 @@ GLOBAL OPTIONS: --lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength --nousb Disables monitoring for and managing USB hardware wallets --pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm") - --rpcaddr value HTTP-RPC server listening interface (default: "localhost") - --rpcvhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost") + --http.addr value HTTP-RPC server listening interface (default: "localhost") + --http.vhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost") --ipcdisable Disable the IPC-RPC server --ipcpath Filename for IPC socket/pipe within the datadir (explicit paths escape it) - --rpc Enable the HTTP-RPC server - --rpcport value HTTP-RPC server listening port (default: 8550) + --http Enable the HTTP-RPC server + --http.port value HTTP-RPC server listening port (default: 8550) --signersecret value A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash --4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json") --auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log") @@ -46,6 +46,7 @@ GLOBAL OPTIONS: --stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when Clef is started by an external process. --stdio-ui-test Mechanism to test interface between Clef and UI. Requires 'stdio-ui'. --advanced If enabled, issues warnings instead of rejections for suspicious requests. Default off + --suppress-bootwarn If set, does not show the warning during boot --help, -h show help --version, -v print the version ``` @@ -112,11 +113,11 @@ Some snags and todos ### External API -Clef listens to HTTP requests on `rpcaddr`:`rpcport` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification). +Clef listens to HTTP requests on `http.addr`:`http.port` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification). -Some of these call can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a users decides to ignore the confirmation request. +Some of these calls can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a user decides to ignore the confirmation request. -The External API is **untrusted**: it does not accept credentials over this API, nor does it expect that requests have any authority. +The External API is **untrusted**: it does not accept credentials, nor does it expect that requests have any authority. ### Internal UI API @@ -145,13 +146,11 @@ See the [external API changelog](extapi_changelog.md) for information about chan All hex encoded values must be prefixed with `0x`. -## Methods - ### account_new #### Create new password protected account -The signer will generate a new private key, encrypts it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and stores it in the keystore directory. +The signer will generate a new private key, encrypt it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and store it in the keystore directory. The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts. #### Arguments @@ -160,7 +159,6 @@ None #### Result - address [string]: account address that is derived from the generated key - - url [string]: location of the keyfile #### Sample call ```json @@ -172,14 +170,11 @@ None } ``` Response -``` +```json { "id": 0, "jsonrpc": "2.0", - "result": { - "address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133", - "url": "keystore:///my/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133" - } + "result": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133" } ``` @@ -195,8 +190,6 @@ None #### Result - array with account records: - account.address [string]: account address that is derived from the generated key - - account.type [string]: type of the - - account.url [string]: location of the account #### Sample call ```json @@ -207,21 +200,13 @@ None } ``` Response -``` +```json { "id": 1, "jsonrpc": "2.0", "result": [ - { - "address": "0xafb2f771f58513609765698f65d3f2f0224a956f", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T07-26-47.162109726Z--afb2f771f58513609765698f65d3f2f0224a956f" - }, - { - "address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133" - } + "0xafb2f771f58513609765698f65d3f2f0224a956f", + "0xbea9183f8f4f03d427f6bcea17388bdff1cab133" ] } ``` @@ -229,10 +214,10 @@ Response ### account_signTransaction #### Sign transactions - Signs a transactions and responds with the signed transaction in RLP encoded form. + Signs a transaction and responds with the signed transaction in RLP-encoded and JSON forms. #### Arguments - 2. transaction object: + 1. transaction object: - `from` [address]: account to send the transaction from - `to` [address]: receiver account. If omitted or `0x`, will cause contract creation. - `gas` [number]: maximum amount of gas to burn @@ -240,12 +225,13 @@ Response - `value` [number:optional]: amount of Wei to send with the transaction - `data` [data:optional]: input data - `nonce` [number]: account nonce - 3. method signature [string:optional] + 1. method signature [string:optional] - The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected. #### Result - - signed transaction in RLP encoded form [data] + - raw [data]: signed transaction in RLP encoded form + - tx [json]: signed transaction in JSON form #### Sample call ```json @@ -270,11 +256,22 @@ Response ```json { - "id": 2, "jsonrpc": "2.0", - "error": { - "code": -32000, - "message": "Request denied" + "id": 2, + "result": { + "raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "tx": { + "nonce": "0x0", + "gasPrice": "0x1234", + "gas": "0x55555", + "to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0", + "value": "0x1234", + "input": "0xabcd", + "v": "0x26", + "r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e", + "s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e" + } } } ``` @@ -326,7 +323,7 @@ Response Bash example: ```bash -#curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/ +> curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/ {"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}} ``` @@ -373,7 +370,7 @@ Response ### account_signTypedData #### Sign data - Signs a chunk of structured data conformant to [EIP712]([EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md)) and returns the calculated signature. + Signs a chunk of structured data conformant to [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md) and returns the calculated signature. #### Arguments - account [address]: account to sign with @@ -469,7 +466,7 @@ Response ### account_ecRecover -#### Sign data +#### Recover the signing address Derive the address from the account that was used to sign data with content type `text/plain` and the signature. @@ -487,7 +484,6 @@ Derive the address from the account that was used to sign data with content type "jsonrpc": "2.0", "method": "account_ecRecover", "params": [ - "data/plain", "0xaabbccdd", "0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c" ] @@ -503,117 +499,36 @@ Response } ``` -### account_import +### account_version -#### Import account - Import a private key into the keystore. The imported key is expected to be encrypted according to the web3 keystore - format. - -#### Arguments - - account [object]: key in [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) (retrieved with account_export) +#### Get external API version -#### Result - - imported key [object]: - - key.address [address]: address of the imported key - - key.type [string]: type of the account - - key.url [string]: key URL - -#### Sample call -```json -{ - "id": 6, - "jsonrpc": "2.0", - "method": "account_import", - "params": [ - { - "address": "c7412fc59930fd90099c917a50e5f11d0934b2f5", - "crypto": { - "cipher": "aes-128-ctr", - "cipherparams": { - "iv": "401c39a7c7af0388491c3d3ecb39f532" - }, - "ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "n": 262144, - "p": 1, - "r": 8, - "salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a" - }, - "mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806" - }, - "id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9", - "version": 3 - } - ] -} -``` -Response - -```json -{ - "id": 6, - "jsonrpc": "2.0", - "result": { - "address": "0xc7412fc59930fd90099c917a50e5f11d0934b2f5", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T11-00-42.032024108Z--c7412fc59930fd90099c917a50e5f11d0934b2f5" - } -} -``` - -### account_export - -#### Export account from keystore - Export a private key from the keystore. The exported private key is encrypted with the original password. When the - key is imported later this password is required. +Get the version of the external API used by Clef. #### Arguments - - account [address]: export private key that is associated with this account + +None #### Result - - exported key, see [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) for - more information + +* external API version [string] #### Sample call ```json { - "id": 5, + "id": 0, "jsonrpc": "2.0", - "method": "account_export", - "params": [ - "0xc7412fc59930fd90099c917a50e5f11d0934b2f5" - ] + "method": "account_version", + "params": [] } ``` -Response +Response ```json { - "id": 5, - "jsonrpc": "2.0", - "result": { - "address": "c7412fc59930fd90099c917a50e5f11d0934b2f5", - "crypto": { - "cipher": "aes-128-ctr", - "cipherparams": { - "iv": "401c39a7c7af0388491c3d3ecb39f532" - }, - "ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "n": 262144, - "p": 1, - "r": 8, - "salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a" - }, - "mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806" - }, - "id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9", - "version": 3 - } + "id": 0, + "jsonrpc": "2.0", + "result": "6.0.0" } ``` @@ -625,7 +540,7 @@ By starting the signer with the switch `--stdio-ui-test`, the signer will invoke denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented. See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'. -All methods in this API uses object-based parameters, so that there can be no mixups of parameters: each piece of data is accessed by key. +All methods in this API use object-based parameters, so that there can be no mixup of parameters: each piece of data is accessed by key. See the [ui API changelog](intapi_changelog.md) for information about changes to this API. @@ -784,12 +699,10 @@ Invoked when a request for account listing has been made. { "accounts": [ { - "type": "Account", "url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42", "address": "0x123409812340981234098123409812deadbeef42" }, { - "type": "Account", "url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42", "address": "0xcafebabedeadbeef34098123409812deadbeef42" } @@ -819,7 +732,13 @@ Invoked when a request for account listing has been made. { "address": "0x123409812340981234098123409812deadbeef42", "raw_data": "0x01020304", - "message": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004", + "messages": [ + { + "name": "message", + "value": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004", + "type": "text/plain" + } + ], "hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310", "meta": { "remote": "signer binary", @@ -829,12 +748,34 @@ Invoked when a request for account listing has been made. } ] } +``` + +### ApproveNewAccount / `ui_approveNewAccount` + +Invoked when a request for creating a new account has been made. +#### Sample call + +```json +{ + "jsonrpc": "2.0", + "id": 4, + "method": "ui_approveNewAccount", + "params": [ + { + "meta": { + "remote": "signer binary", + "local": "main", + "scheme": "in-proc" + } + } + ] +} ``` ### ShowInfo / `ui_showInfo` -The UI should show the info to the user. Does not expect response. +The UI should show the info (a single message) to the user. Does not expect response. #### Sample call @@ -844,9 +785,7 @@ The UI should show the info to the user. Does not expect response. "id": 9, "method": "ui_showInfo", "params": [ - { - "text": "Tests completed" - } + "Tests completed" ] } @@ -854,18 +793,16 @@ The UI should show the info to the user. Does not expect response. ### ShowError / `ui_showError` -The UI should show the info to the user. Does not expect response. +The UI should show the error (a single message) to the user. Does not expect response. ```json { "jsonrpc": "2.0", "id": 2, - "method": "ShowError", + "method": "ui_showError", "params": [ - { - "text": "Testing 'ShowError'" - } + "Something bad happened!" ] } @@ -879,9 +816,36 @@ When implementing rate-limited rules, this callback should be used. TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`. +Example call: +```json + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "ui_onApprovedTx", + "params": [ + { + "raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "tx": { + "nonce": "0x0", + "gasPrice": "0x1", + "gas": "0x333", + "to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0", + "value": "0x0", + "input": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012", + "v": "0x26", + "r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e", + "s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e" + } + } + ] +} +``` + ### OnSignerStartup / `ui_onSignerStartup` -This method provide the UI with information about what API version the signer uses (both internal and external) aswell as build-info and external API, +This method provides the UI with information about what API version the signer uses (both internal and external) as well as build-info and external API, in k/v-form. Example call: @@ -905,6 +869,27 @@ Example call: ``` +### OnInputRequired / `ui_onInputRequired` + +Invoked when Clef requires user input (e.g. a password). + +Example call: +```json + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "ui_onInputRequired", + "params": [ + { + "title": "Account password", + "prompt": "Please enter the password for account 0x694267f14675d7e1b9494fd8d72fefe1755710fa", + "isPassword": true + } + ] +} +``` + ### Rules for UI apis diff --git a/cmd/clef/datatypes.md b/cmd/clef/datatypes.md index 5ebf9adc9712..dd8cda584649 100644 --- a/cmd/clef/datatypes.md +++ b/cmd/clef/datatypes.md @@ -3,7 +3,7 @@ These data types are defined in the channel between clef and the UI ### SignDataRequest -SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to presentthe user with the contents of the `message` +SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to present the user with the contents of the `message` Example: ```json diff --git a/cmd/clef/docs/setup.md b/cmd/clef/docs/setup.md index 33d2b0381f06..6cc7a4120d97 100644 --- a/cmd/clef/docs/setup.md +++ b/cmd/clef/docs/setup.md @@ -34,7 +34,7 @@ There are two ways that this can be achieved: integrated via Qubes or integrated #### 1. Qubes Integrated -Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request +Qubes provides a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request to another qube. The OS then asks the user if the call is permitted. ![Example](qubes/qrexec-example.png) @@ -48,7 +48,7 @@ This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. ![Clef via qrexec](qubes/clef_qubes_qrexec.png) -On the `target` qubes, we need to define the rpc service. +On the `target` qubes, we need to define the RPC service. [qubes.Clefsign](qubes/qubes.Clefsign): @@ -94,7 +94,7 @@ with minimal requirements. On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it. -[qubes-client.py](qubes/client/qubes-client.py): +[qubes-client.py](qubes/qubes-client.py): ```python @@ -135,11 +135,11 @@ $ cat newaccnt.json $ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign ``` -This should pop up first a dialog to allow the IPC call: +A dialog should pop up first to allow the IPC call: ![one](qubes/qubes_newaccount-1.png) -Followed by a GTK-dialog to approve the operation +Followed by a GTK-dialog to approve the operation: ![two](qubes/qubes_newaccount-2.png) @@ -169,7 +169,7 @@ However, it comes with a couple of drawbacks: - The `Origin` header must be forwarded - Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header, since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address. -- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups: +- Even with a policy in place to allow RPC calls between `caller` and `target`, there will be several popups: - One qubes-specific where the user specifies the `target` vm - One clef-specific to approve the transaction @@ -177,7 +177,7 @@ However, it comes with a couple of drawbacks: #### 2. Network integrated The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible -form other qubes. +from other qubes. ![Clef via http](qubes/clef_qubes_http.png) @@ -186,13 +186,13 @@ form other qubes. ## USBArmory -The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size +The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 MHz ARM processor. It is a pocket-size computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network to your computer. Over this new network interface, you can SSH into the device. Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only ever connects to a local network between your computer and the device itself. -Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access +Needless to say, while this model should be fairly secure against remote attacks, an attacker with physical access to the USB Armory would trivially be able to extract the contents of the device filesystem. diff --git a/cmd/clef/extapi_changelog.md b/cmd/clef/extapi_changelog.md index dbc302631bc1..31554f079020 100644 --- a/cmd/clef/extapi_changelog.md +++ b/cmd/clef/extapi_changelog.md @@ -10,6 +10,64 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the: Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. +### 6.1.0 + +The API-method `account_signGnosisSafeTx` was added. This method takes two parameters, +`[address, safeTx]`. The latter, `safeTx`, can be copy-pasted from the gnosis relay. For example: + +``` +{ + "jsonrpc": "2.0", + "method": "account_signGnosisSafeTx", + "params": ["0xfd1c4226bfD1c436672092F4eCbfC270145b7256", + { + "safe": "0x25a6c4BBd32B2424A9c99aEB0584Ad12045382B3", + "to": "0xB372a646f7F05Cc1785018dBDA7EBc734a2A20E2", + "value": "20000000000000000", + "data": null, + "operation": 0, + "gasToken": "0x0000000000000000000000000000000000000000", + "safeTxGas": 27845, + "baseGas": 0, + "gasPrice": "0", + "refundReceiver": "0x0000000000000000000000000000000000000000", + "nonce": 2, + "executionDate": null, + "submissionDate": "2020-09-15T21:54:49.617634Z", + "modified": "2020-09-15T21:54:49.617634Z", + "blockNumber": null, + "transactionHash": null, + "safeTxHash": "0x2edfbd5bc113ff18c0631595db32eb17182872d88d9bf8ee4d8c2dd5db6d95e2", + "executor": null, + "isExecuted": false, + "isSuccessful": null, + "ethGasPrice": null, + "gasUsed": null, + "fee": null, + "origin": null, + "dataDecoded": null, + "confirmationsRequired": null, + "confirmations": [ + { + "owner": "0xAd2e180019FCa9e55CADe76E4487F126Fd08DA34", + "submissionDate": "2020-09-15T21:54:49.663299Z", + "transactionHash": null, + "confirmationType": "CONFIRMATION", + "signature": "0x95a7250bb645f831c86defc847350e7faff815b2fb586282568e96cc859e39315876db20a2eed5f7a0412906ec5ab57652a6f645ad4833f345bda059b9da2b821c", + "signatureType": "EOA" + } + ], + "signatures": null + } + ], + "id": 67 +} +``` + +Not all fields are required, though. This method is really just a UX helper, which massages the +input to conform to the `EIP-712` [specification](https://docs.gnosis.io/safe/docs/contracts_tx_execution/#transaction-hash) +for the Gnosis Safe, and making the output be directly importable to by a relay service. + ### 6.0.0 diff --git a/cmd/clef/intapi_changelog.md b/cmd/clef/intapi_changelog.md index 38424f06b9fd..eaeb2e68620b 100644 --- a/cmd/clef/intapi_changelog.md +++ b/cmd/clef/intapi_changelog.md @@ -10,6 +10,17 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the: Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. +### 7.0.1 + +Added `clef_New` to the internal API callable from a UI. + +> `New` creates a new password protected Account. The private key is protected with +> the given password. Users are responsible to backup the private key that is stored +> in the keystore location that was specified when this API was created. +> This method is the same as New on the external API, the difference being that +> this implementation does not ask for confirmation, since it's initiated by +> the user + ### 7.0.0 - The `message` field was renamed to `messages` in all data signing request methods to better reflect that it's a list, not a value. @@ -150,7 +161,7 @@ UserInputResponse struct { #### 1.2.0 * Add `OnStartup` method, to provide the UI with information about what API version -the signer uses (both internal and external) aswell as build-info and external api. +the signer uses (both internal and external) as well as build-info and external api. Example call: ```json diff --git a/cmd/clef/main.go b/cmd/clef/main.go index b2c8812ab269..273919cb410f 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -29,9 +29,9 @@ import ( "math/big" "os" "os/signal" - "os/user" "path/filepath" "runtime" + "sort" "strings" "time" @@ -40,10 +40,10 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -53,7 +53,8 @@ import ( "github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/storage" - colorable "github.com/mattn/go-colorable" + + "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -82,6 +83,10 @@ var ( Name: "advanced", Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off", } + acceptFlag = cli.BoolFlag{ + Name: "suppress-bootwarn", + Usage: "If set, does not show the warning during boot", + } keystoreFlag = cli.StringFlag{ Name: "keystore", Value: filepath.Join(node.DefaultDataDir(), "keystore"), @@ -98,10 +103,15 @@ var ( Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)", } rpcPortFlag = cli.IntFlag{ - Name: "rpcport", + Name: "http.port", Usage: "HTTP-RPC server listening port", Value: node.DefaultHTTPPort + 5, } + legacyRPCPortFlag = cli.IntFlag{ + Name: "rpcport", + Usage: "HTTP-RPC server listening port (Deprecated, please use --http.port).", + Value: node.DefaultHTTPPort + 5, + } signerSecretFlag = cli.StringFlag{ Name: "signersecret", Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash", @@ -187,6 +197,22 @@ The setpw command stores a password for a given address (keyfile). Description: ` The delpw command removes a password for a given address (keyfile). `} + newAccountCommand = cli.Command{ + Action: utils.MigrateFlags(newAccount), + Name: "newaccount", + Usage: "Create a new account", + ArgsUsage: "", + Flags: []cli.Flag{ + logLevelFlag, + keystoreFlag, + utils.LightKDFFlag, + acceptFlag, + }, + Description: ` +The newaccount command creates a new keystore-backed account. It is a convenience-method +which can be used in lieu of an external UI.`, + } + gendocCommand = cli.Command{ Action: GenDoc, Name: "gendoc", @@ -196,6 +222,42 @@ The gendoc generates example structures of the json-rpc communication types. `} ) +// AppHelpFlagGroups is the application flags, grouped by functionality. +var AppHelpFlagGroups = []flags.FlagGroup{ + { + Name: "FLAGS", + Flags: []cli.Flag{ + logLevelFlag, + keystoreFlag, + configdirFlag, + chainIdFlag, + utils.LightKDFFlag, + utils.NoUSBFlag, + utils.SmartCardDaemonPathFlag, + utils.HTTPListenAddrFlag, + utils.HTTPVirtualHostsFlag, + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.HTTPEnabledFlag, + rpcPortFlag, + signerSecretFlag, + customDBFlag, + auditLogFlag, + ruleFlag, + stdiouiFlag, + testFlag, + advancedMode, + acceptFlag, + }, + }, + { + Name: "ALIASED (deprecated)", + Flags: []cli.Flag{ + legacyRPCPortFlag, + }, + }, +} + func init() { app.Name = "Clef" app.Usage = "Manage Ethereum account operations" @@ -207,11 +269,11 @@ func init() { utils.LightKDFFlag, utils.NoUSBFlag, utils.SmartCardDaemonPathFlag, - utils.RPCListenAddrFlag, - utils.RPCVirtualHostsFlag, + utils.HTTPListenAddrFlag, + utils.HTTPVirtualHostsFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, - utils.RPCEnabledFlag, + utils.HTTPEnabledFlag, rpcPortFlag, signerSecretFlag, customDBFlag, @@ -220,10 +282,51 @@ func init() { stdiouiFlag, testFlag, advancedMode, + acceptFlag, + legacyRPCPortFlag, } app.Action = signer - app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand} - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + app.Commands = []cli.Command{initCommand, + attestCommand, + setCredentialCommand, + delCredentialCommand, + newAccountCommand, + gendocCommand} + cli.CommandHelpTemplate = flags.CommandHelpTemplate + // Override the default app help template + cli.AppHelpTemplate = flags.ClefAppHelpTemplate + + // Override the default app help printer, but only for the global app help + originalHelpPrinter := cli.HelpPrinter + cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) { + if tmpl == flags.ClefAppHelpTemplate { + // Render out custom usage screen + originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups}) + } else if tmpl == flags.CommandHelpTemplate { + // Iterate over all command specific flags and categorize them + categorized := make(map[string][]cli.Flag) + for _, flag := range data.(cli.Command).Flags { + if _, ok := categorized[flag.String()]; !ok { + categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag) + } + } + + // sort to get a stable ordering + sorted := make([]flags.FlagGroup, 0, len(categorized)) + for cat, flgs := range categorized { + sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs}) + } + sort.Sort(flags.ByCategory(sorted)) + + // add sorted array to data and render with default printer + originalHelpPrinter(w, tmpl, map[string]interface{}{ + "cmd": data, + "categorizedFlags": sorted, + }) + } else { + originalHelpPrinter(w, tmpl, data) + } + } } func main() { @@ -263,7 +366,7 @@ func initializeSecrets(c *cli.Context) error { text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!" var password string for { - password = getPassPhrase(text, true) + password = utils.GetPassPhrase(text, true) if err := core.ValidatePasswordFormat(password); err != nil { fmt.Printf("invalid password: %v\n", err) } else { @@ -336,7 +439,7 @@ func setCredential(ctx *cli.Context) error { utils.Fatalf("Invalid address specified: %s", addr) } address := common.HexToAddress(addr) - password := getPassPhrase("Please enter a password to store for this address:", true) + password := utils.GetPassPhrase("Please enter a password to store for this address:", true) fmt.Println() stretchedKey, err := readMasterKey(ctx, nil) @@ -382,14 +485,41 @@ func removeCredential(ctx *cli.Context) error { return nil } +func newAccount(c *cli.Context) error { + if err := initialize(c); err != nil { + return err + } + // The newaccount is meant for users using the CLI, since 'real' external + // UIs can use the UI-api instead. So we'll just use the native CLI UI here. + var ( + ui = core.NewCommandlineUI() + pwStorage storage.Storage = &storage.NoStorage{} + ksLoc = c.GlobalString(keystoreFlag.Name) + lightKdf = c.GlobalBool(utils.LightKDFFlag.Name) + ) + log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf) + am := core.StartClefAccountManager(ksLoc, true, lightKdf, "") + // This gives is us access to the external API + apiImpl := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage) + // This gives us access to the internal API + internalApi := core.NewUIServerAPI(apiImpl) + addr, err := internalApi.New(context.Background()) + if err == nil { + fmt.Printf("Generated account %v\n", addr.String()) + } + return err +} + func initialize(c *cli.Context) error { // Set up the logger to print everything logOutput := os.Stdout if c.GlobalBool(stdiouiFlag.Name) { logOutput = os.Stderr // If using the stdioui, we can't do the 'confirm'-flow - fmt.Fprint(logOutput, legalWarning) - } else { + if !c.GlobalBool(acceptFlag.Name) { + fmt.Fprint(logOutput, legalWarning) + } + } else if !c.GlobalBool(acceptFlag.Name) { if !confirm(legalWarning) { return fmt.Errorf("aborted by user") } @@ -457,7 +587,6 @@ func signer(c *cli.Context) error { api core.ExternalAPI pwStorage storage.Storage = &storage.NoStorage{} ) - configDir := c.GlobalString(configdirFlag.Name) if stretchedKey, err := readMasterKey(c, ui); err != nil { log.Warn("Failed to open master, rules disabled", "err", err) @@ -535,22 +664,39 @@ func signer(c *cli.Context) error { Service: api, Version: "1.0"}, } - if c.GlobalBool(utils.RPCEnabledFlag.Name) { - vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name)) - cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name)) + if c.GlobalBool(utils.HTTPEnabledFlag.Name) { + vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name)) + cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) + + srv := rpc.NewServer() + err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false) + if err != nil { + utils.Fatalf("Could not register API: %w", err) + } + handler := node.NewHTTPHandlerStack(srv, cors, vhosts) + + // set port + port := c.Int(rpcPortFlag.Name) + if c.GlobalIsSet(legacyRPCPortFlag.Name) { + if !c.GlobalIsSet(rpcPortFlag.Name) { + port = c.Int(legacyRPCPortFlag.Name) + } + log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port") + } // start http server - httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name)) - listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts) + httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port) + httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler) if err != nil { utils.Fatalf("Could not start RPC api: %v", err) } - extapiURL = fmt.Sprintf("http://%s", httpEndpoint) + extapiURL = fmt.Sprintf("http://%v/", addr) log.Info("HTTP endpoint opened", "url", extapiURL) defer func() { - listener.Close() - log.Info("HTTP endpoint closed", "url", httpEndpoint) + // Don't bother imposing a timeout here. + httpServer.Shutdown(context.Background()) + log.Info("HTTP endpoint closed", "url", extapiURL) }() } if !c.GlobalBool(utils.IPCDisabledFlag.Name) { @@ -589,21 +735,11 @@ func signer(c *cli.Context) error { return nil } -// splitAndTrim splits input separated by a comma -// and trims excessive white space from the substrings. -func splitAndTrim(input string) []string { - result := strings.Split(input, ",") - for i, r := range result { - result[i] = strings.TrimSpace(r) - } - return result -} - // DefaultConfigDir is the default config directory to use for the vaults and other // persistence requirements. func DefaultConfigDir() string { // Try to place the data folder in the user's home dir - home := homeDir() + home := utils.HomeDir() if home != "" { if runtime.GOOS == "darwin" { return filepath.Join(home, "Library", "Signer") @@ -611,26 +747,15 @@ func DefaultConfigDir() string { appdata := os.Getenv("APPDATA") if appdata != "" { return filepath.Join(appdata, "Signer") - } else { - return filepath.Join(home, "AppData", "Roaming", "Signer") } - } else { - return filepath.Join(home, ".clef") + return filepath.Join(home, "AppData", "Roaming", "Signer") } + return filepath.Join(home, ".clef") } // As we cannot guess a stable location, return empty and handle later return "" } -func homeDir() string { - if home := os.Getenv("HOME"); home != "" { - return home - } - if usr, err := user.Current(); err == nil { - return usr.HomeDir - } - return "" -} func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { var ( file string @@ -660,7 +785,7 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { } password = resp.Text } else { - password = getPassPhrase("Decrypt master seed of clef", false) + password = utils.GetPassPhrase("Decrypt master seed of clef", false) } masterSeed, err := decryptSeed(cipherKey, password) if err != nil { @@ -855,27 +980,6 @@ func testExternalUI(api *core.SignerAPI) { } -// getPassPhrase retrieves the password associated with clef, either fetched -// from a list of preloaded passphrases, or requested interactively from the user. -// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one. -func getPassPhrase(prompt string, confirmation bool) string { - fmt.Println(prompt) - password, err := console.Stdin.PromptPassword("Password: ") - if err != nil { - utils.Fatalf("Failed to read password: %v", err) - } - if confirmation { - confirm, err := console.Stdin.PromptPassword("Repeat password: ") - if err != nil { - utils.Fatalf("Failed to read password confirmation: %v", err) - } - if password != confirm { - utils.Fatalf("Passwords do not match") - } - } - return password -} - type encryptedSeedStorage struct { Description string `json:"description"` Version int `json:"version"` @@ -926,7 +1030,7 @@ func GenDoc(ctx *cli.Context) { if data, err := json.MarshalIndent(v, "", " "); err == nil { output = append(output, fmt.Sprintf("### %s\n\n%s\n\nExample:\n```json\n%s\n```", name, desc, data)) } else { - log.Error("Error generating output", err) + log.Error("Error generating output", "err", err) } } ) diff --git a/cmd/clef/tutorial.md b/cmd/clef/tutorial.md index 4453472e2d1c..3ea662b5d4c7 100644 --- a/cmd/clef/tutorial.md +++ b/cmd/clef/tutorial.md @@ -1,6 +1,6 @@ ## Initializing Clef -First thing's first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password: +First things first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password: ```text $ clef init diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md new file mode 100644 index 000000000000..e1372d015899 --- /dev/null +++ b/cmd/devp2p/README.md @@ -0,0 +1,105 @@ +# The devp2p command + +The devp2p command line tool is a utility for low-level peer-to-peer debugging and +protocol development purposes. It can do many things. + +### ENR Decoding + +Use `devp2p enrdump ` to verify and display an Ethereum Node Record. + +### Node Key Management + +The `devp2p key ...` command family deals with node key files. + +Run `devp2p key generate mynode.key` to create a new node key in the `mynode.key` file. + +Run `devp2p key to-enode mynode.key -ip 127.0.0.1 -tcp 30303` to create an enode:// URL +corresponding to the given node key and address information. + +### Maintaining DNS Discovery Node Lists + +The devp2p command can create and publish DNS discovery node lists. + +Run `devp2p dns sign ` to update the signature of a DNS discovery tree. + +Run `devp2p dns sync ` to download a complete DNS discovery tree. + +Run `devp2p dns to-cloudflare ` to publish a tree to CloudFlare DNS. + +Run `devp2p dns to-route53 ` to publish a tree to Amazon Route53. + +You can find more information about these commands in the [DNS Discovery Setup Guide][dns-tutorial]. + +### Discovery v4 Utilities + +The `devp2p discv4 ...` command family deals with the [Node Discovery v4][discv4] +protocol. + +Run `devp2p discv4 ping ` to ping a node. + +Run `devp2p discv4 resolve ` to find the most recent node record of a node in +the DHT. + +Run `devp2p discv4 crawl ` to create or update a JSON node set. + +### Discovery v5 Utilities + +The `devp2p discv5 ...` command family deals with the [Node Discovery v5][discv5] +protocol. This protocol is currently under active development. + +Run `devp2p discv5 ping ` to ping a node. + +Run `devp2p discv5 resolve ` to find the most recent node record of a node in +the discv5 DHT. + +Run `devp2p discv5 listen` to run a Discovery v5 node. + +Run `devp2p discv5 crawl ` to create or update a JSON node set containing +discv5 nodes. + +### Discovery Test Suites + +The devp2p command also contains interactive test suites for Discovery v4 and Discovery +v5. + +To run these tests against your implementation, you need to set up a networking +environment where two separate UDP listening addresses are available on the same machine. +The two listening addresses must also be routed such that they are able to reach the node +you want to test. + +For example, if you want to run the test on your local host, and the node under test is +also on the local host, you need to assign two IP addresses (or a larger range) to your +loopback interface. On macOS, this can be done by executing the following command: + + sudo ifconfig lo0 add 127.0.0.2 + +You can now run either test suite as follows: Start the node under test first, ensuring +that it won't talk to the Internet (i.e. disable bootstrapping). An easy way to prevent +unintended connections to the global DHT is listening on `127.0.0.1`. + +Now get the ENR of your node and store it in the `NODE` environment variable. + +Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.0.2 $NODE`. + +### Eth Protocol Test Suite + +The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth]. + +To run the eth protocol test suite against your implementation, the node needs to be initialized as such: + +1. initialize the geth node with the `genesis.json` file contained in the `testdata` directory +2. import the `halfchain.rlp` file in the `testdata` directory +3. run geth with the following flags: +``` +geth --datadir --nodiscover --nat=none --networkid 19763 --verbosity 5 +``` + +Then, run the following command, replacing `` with the enode of the geth node: + ``` + devp2p rlpx eth-test cmd/devp2p/internal/ethtest/testdata/fullchain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + +[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md +[dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup +[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md +[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go index 92aaad72a372..9259b4894c9a 100644 --- a/cmd/devp2p/crawl.go +++ b/cmd/devp2p/crawl.go @@ -20,14 +20,13 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" ) type crawler struct { input nodeSet output nodeSet - disc *discover.UDPv4 + disc resolver iters []enode.Iterator inputIter enode.Iterator ch chan *enode.Node @@ -37,7 +36,11 @@ type crawler struct { revalidateInterval time.Duration } -func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler { +type resolver interface { + RequestENR(*enode.Node) (*enode.Node, error) +} + +func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler { c := &crawler{ input: input, output: make(nodeSet, len(input)), @@ -63,6 +66,7 @@ func (c *crawler) run(timeout time.Duration) nodeSet { doneCh = make(chan enode.Iterator, len(c.iters)) liveIters = len(c.iters) ) + defer timeoutTimer.Stop() for _, it := range c.iters { go c.runIterator(doneCh, it) } diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 9525bec66817..3b6dc09a1cc8 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/discover" @@ -40,6 +41,7 @@ var ( discv4ResolveCommand, discv4ResolveJSONCommand, discv4CrawlCommand, + discv4TestCommand, }, } discv4PingCommand = cli.Command{ @@ -74,6 +76,18 @@ var ( Action: discv4Crawl, Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, } + discv4TestCommand = cli.Command{ + Name: "test", + Usage: "Runs tests against a node", + Action: discv4Test, + Flags: []cli.Flag{ + remoteEnodeFlag, + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, + } ) var ( @@ -81,11 +95,28 @@ var ( Name: "bootnodes", Usage: "Comma separated nodes used for bootstrapping", } + nodekeyFlag = cli.StringFlag{ + Name: "nodekey", + Usage: "Hex-encoded node key", + } + nodedbFlag = cli.StringFlag{ + Name: "nodedb", + Usage: "Nodes database location", + } + listenAddrFlag = cli.StringFlag{ + Name: "addr", + Usage: "Listening address", + } crawlTimeoutFlag = cli.DurationFlag{ Name: "timeout", Usage: "Time limit for the crawl.", Value: 30 * time.Minute, } + remoteEnodeFlag = cli.StringFlag{ + Name: "remote", + Usage: "Enode of the remote node under test", + EnvVar: "REMOTE_ENODE", + } ) func discv4Ping(ctx *cli.Context) error { @@ -172,28 +203,42 @@ func discv4Crawl(ctx *cli.Context) error { return nil } -func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { - s := params.RinkebyBootnodes - if ctx.IsSet(bootnodesFlag.Name) { - s = strings.Split(ctx.String(bootnodesFlag.Name), ",") - } - nodes := make([]*enode.Node, len(s)) - var err error - for i, record := range s { - nodes[i], err = parseNode(record) - if err != nil { - return nil, fmt.Errorf("invalid bootstrap node: %v", err) - } +// discv4Test runs the protocol test suite. +func discv4Test(ctx *cli.Context) error { + // Configure test package globals. + if !ctx.IsSet(remoteEnodeFlag.Name) { + return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name) } - return nodes, nil + v4test.Remote = ctx.String(remoteEnodeFlag.Name) + v4test.Listen1 = ctx.String(testListen1Flag.Name) + v4test.Listen2 = ctx.String(testListen2Flag.Name) + return runTests(ctx, v4test.AllTests) } // startV4 starts an ephemeral discovery V4 node. func startV4(ctx *cli.Context) *discover.UDPv4 { - socket, ln, cfg, err := listen() + ln, config := makeDiscoveryConfig(ctx) + socket := listen(ln, ctx.String(listenAddrFlag.Name)) + disc, err := discover.ListenV4(socket, ln, config) if err != nil { exit(err) } + return disc +} + +func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) { + var cfg discover.Config + + if ctx.IsSet(nodekeyFlag.Name) { + key, err := crypto.HexToECDSA(ctx.String(nodekeyFlag.Name)) + if err != nil { + exit(fmt.Errorf("-%s: %v", nodekeyFlag.Name, err)) + } + cfg.PrivateKey = key + } else { + cfg.PrivateKey, _ = crypto.GenerateKey() + } + if commandHasFlag(ctx, bootnodesFlag) { bn, err := parseBootnodes(ctx) if err != nil { @@ -201,26 +246,51 @@ func startV4(ctx *cli.Context) *discover.UDPv4 { } cfg.Bootnodes = bn } - disc, err := discover.ListenV4(socket, ln, cfg) + + dbpath := ctx.String(nodedbFlag.Name) + db, err := enode.OpenDB(dbpath) if err != nil { exit(err) } - return disc -} - -func listen() (*net.UDPConn, *enode.LocalNode, discover.Config, error) { - var cfg discover.Config - cfg.PrivateKey, _ = crypto.GenerateKey() - db, _ := enode.OpenDB("") ln := enode.NewLocalNode(db, cfg.PrivateKey) + return ln, cfg +} - socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{0, 0, 0, 0}}) +func listen(ln *enode.LocalNode, addr string) *net.UDPConn { + if addr == "" { + addr = "0.0.0.0:0" + } + socket, err := net.ListenPacket("udp4", addr) if err != nil { - db.Close() - return nil, nil, cfg, err + exit(err) + } + usocket := socket.(*net.UDPConn) + uaddr := socket.LocalAddr().(*net.UDPAddr) + if uaddr.IP.IsUnspecified() { + ln.SetFallbackIP(net.IP{127, 0, 0, 1}) + } else { + ln.SetFallbackIP(uaddr.IP) + } + ln.SetFallbackUDP(uaddr.Port) + return usocket +} + +func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { + s := params.RinkebyBootnodes + if ctx.IsSet(bootnodesFlag.Name) { + input := ctx.String(bootnodesFlag.Name) + if input == "" { + return nil, nil + } + s = strings.Split(input, ",") } - addr := socket.LocalAddr().(*net.UDPAddr) - ln.SetFallbackIP(net.IP{127, 0, 0, 1}) - ln.SetFallbackUDP(addr.Port) - return socket, ln, cfg, nil + nodes := make([]*enode.Node, len(s)) + var err error + for i, record := range s { + nodes[i], err = parseNode(record) + if err != nil { + return nil, fmt.Errorf("invalid bootstrap node: %v", err) + } + } + return nodes, nil } diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go new file mode 100644 index 000000000000..e20d7c9cfae6 --- /dev/null +++ b/cmd/devp2p/discv5cmd.go @@ -0,0 +1,146 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/p2p/discover" + "gopkg.in/urfave/cli.v1" +) + +var ( + discv5Command = cli.Command{ + Name: "discv5", + Usage: "Node Discovery v5 tools", + Subcommands: []cli.Command{ + discv5PingCommand, + discv5ResolveCommand, + discv5CrawlCommand, + discv5TestCommand, + discv5ListenCommand, + }, + } + discv5PingCommand = cli.Command{ + Name: "ping", + Usage: "Sends ping to a node", + Action: discv5Ping, + } + discv5ResolveCommand = cli.Command{ + Name: "resolve", + Usage: "Finds a node in the DHT", + Action: discv5Resolve, + Flags: []cli.Flag{bootnodesFlag}, + } + discv5CrawlCommand = cli.Command{ + Name: "crawl", + Usage: "Updates a nodes.json file with random nodes found in the DHT", + Action: discv5Crawl, + Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, + } + discv5TestCommand = cli.Command{ + Name: "test", + Usage: "Runs protocol tests against a node", + Action: discv5Test, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, + } + discv5ListenCommand = cli.Command{ + Name: "listen", + Usage: "Runs a node", + Action: discv5Listen, + Flags: []cli.Flag{ + bootnodesFlag, + nodekeyFlag, + nodedbFlag, + listenAddrFlag, + }, + } +) + +func discv5Ping(ctx *cli.Context) error { + n := getNodeArg(ctx) + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Ping(n)) + return nil +} + +func discv5Resolve(ctx *cli.Context) error { + n := getNodeArg(ctx) + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Resolve(n)) + return nil +} + +func discv5Crawl(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need nodes file as argument") + } + nodesFile := ctx.Args().First() + var inputSet nodeSet + if common.FileExist(nodesFile) { + inputSet = loadNodesJSON(nodesFile) + } + + disc := startV5(ctx) + defer disc.Close() + c := newCrawler(inputSet, disc, disc.RandomNodes()) + c.revalidateInterval = 10 * time.Minute + output := c.run(ctx.Duration(crawlTimeoutFlag.Name)) + writeNodesJSON(nodesFile, output) + return nil +} + +// discv5Test runs the protocol test suite. +func discv5Test(ctx *cli.Context) error { + suite := &v5test.Suite{ + Dest: getNodeArg(ctx), + Listen1: ctx.String(testListen1Flag.Name), + Listen2: ctx.String(testListen2Flag.Name), + } + return runTests(ctx, suite.AllTests()) +} + +func discv5Listen(ctx *cli.Context) error { + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Self()) + select {} +} + +// startV5 starts an ephemeral discovery v5 node. +func startV5(ctx *cli.Context) *discover.UDPv5 { + ln, config := makeDiscoveryConfig(ctx) + socket := listen(ln, ctx.String(listenAddrFlag.Name)) + disc, err := discover.ListenV5(socket, ln, config) + if err != nil { + exit(err) + } + return disc +} diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index 83279168ccae..a4d10dcfdd3c 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -130,9 +130,9 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) if !exists { // Entry is unknown, push a new one to Cloudflare. log.Info(fmt.Sprintf("Creating %s = %q", path, val)) - ttl := 1 + ttl := rootTTL if path != name { - ttl = 2147483647 // Max TTL permitted by Cloudflare + ttl = treeNodeTTL // Max TTL permitted by Cloudflare } _, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}) } else if old.Content != val { diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go new file mode 100644 index 000000000000..c5f99529b853 --- /dev/null +++ b/cmd/devp2p/dns_route53.go @@ -0,0 +1,322 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/dnsdisc" + "gopkg.in/urfave/cli.v1" +) + +const ( + // Route53 limits change sets to 32k of 'RDATA size'. Change sets are also limited to + // 1000 items. UPSERTs count double. + // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets + route53ChangeSizeLimit = 32000 + route53ChangeCountLimit = 1000 +) + +var ( + route53AccessKeyFlag = cli.StringFlag{ + Name: "access-key-id", + Usage: "AWS Access Key ID", + EnvVar: "AWS_ACCESS_KEY_ID", + } + route53AccessSecretFlag = cli.StringFlag{ + Name: "access-key-secret", + Usage: "AWS Access Key Secret", + EnvVar: "AWS_SECRET_ACCESS_KEY", + } + route53ZoneIDFlag = cli.StringFlag{ + Name: "zone-id", + Usage: "Route53 Zone ID", + } +) + +type route53Client struct { + api *route53.Route53 + zoneID string +} + +type recordSet struct { + values []string + ttl int64 +} + +// newRoute53Client sets up a Route53 API client from command line flags. +func newRoute53Client(ctx *cli.Context) *route53Client { + akey := ctx.String(route53AccessKeyFlag.Name) + asec := ctx.String(route53AccessSecretFlag.Name) + if akey == "" || asec == "" { + exit(fmt.Errorf("need Route53 Access Key ID and secret proceed")) + } + config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")} + session, err := session.NewSession(config) + if err != nil { + exit(fmt.Errorf("can't create AWS session: %v", err)) + } + return &route53Client{ + api: route53.New(session), + zoneID: ctx.String(route53ZoneIDFlag.Name), + } +} + +// deploy uploads the given tree to Route53. +func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error { + if err := c.checkZone(name); err != nil { + return err + } + + // Compute DNS changes. + existing, err := c.collectRecords(name) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Found %d TXT records", len(existing))) + + records := t.ToTXT(name) + changes := c.computeChanges(name, records, existing) + if len(changes) == 0 { + log.Info("No DNS changes needed") + return nil + } + + // Submit change batches. + batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit) + for i, changes := range batches { + log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes))) + batch := new(route53.ChangeBatch) + batch.SetChanges(changes) + batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())) + req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch} + resp, err := c.api.ChangeResourceRecordSets(req) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id)) + wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id} + if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil { + return err + } + } + return nil +} + +// checkZone verifies zone information for the given domain. +func (c *route53Client) checkZone(name string) (err error) { + if c.zoneID == "" { + c.zoneID, err = c.findZoneID(name) + } + return err +} + +// findZoneID searches for the Zone ID containing the given domain. +func (c *route53Client) findZoneID(name string) (string, error) { + log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name)) + var req route53.ListHostedZonesByNameInput + for { + resp, err := c.api.ListHostedZonesByName(&req) + if err != nil { + return "", err + } + for _, zone := range resp.HostedZones { + if isSubdomain(name, *zone.Name) { + return *zone.Id, nil + } + } + if !*resp.IsTruncated { + break + } + req.DNSName = resp.NextDNSName + req.HostedZoneId = resp.NextHostedZoneId + } + return "", errors.New("can't find zone ID for " + name) +} + +// computeChanges creates DNS changes for the given record. +func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change { + // Convert all names to lowercase. + lrecords := make(map[string]string, len(records)) + for name, r := range records { + lrecords[strings.ToLower(name)] = r + } + records = lrecords + + var changes []*route53.Change + for path, val := range records { + ttl := int64(rootTTL) + if path != name { + ttl = int64(treeNodeTTL) + } + + prevRecords, exists := existing[path] + prevValue := strings.Join(prevRecords.values, "") + if !exists { + // Entry is unknown, push a new one + log.Info(fmt.Sprintf("Creating %s = %q", path, val)) + changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val))) + } else if prevValue != val || prevRecords.ttl != ttl { + // Entry already exists, only change its content. + log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val)) + changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val))) + } else { + log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + } + } + + // Iterate over the old records and delete anything stale. + for path, set := range existing { + if _, ok := records[path]; ok { + continue + } + // Stale entry, nuke it. + log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, ""))) + changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...)) + } + + sortChanges(changes) + return changes +} + +// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. +func sortChanges(changes []*route53.Change) { + score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} + sort.Slice(changes, func(i, j int) bool { + if *changes[i].Action == *changes[j].Action { + return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name + } + return score[*changes[i].Action] < score[*changes[j].Action] + }) +} + +// splitChanges splits up DNS changes such that each change batch +// is smaller than the given RDATA limit. +func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*route53.Change { + var ( + batches [][]*route53.Change + batchSize int + batchCount int + ) + for _, ch := range changes { + // Start new batch if this change pushes the current one over the limit. + count := changeCount(ch) + size := changeSize(ch) * count + overSize := batchSize+size > sizeLimit + overCount := batchCount+count > countLimit + if len(batches) == 0 || overSize || overCount { + batches = append(batches, nil) + batchSize = 0 + batchCount = 0 + } + batches[len(batches)-1] = append(batches[len(batches)-1], ch) + batchSize += size + batchCount += count + } + return batches +} + +// changeSize returns the RDATA size of a DNS change. +func changeSize(ch *route53.Change) int { + size := 0 + for _, rr := range ch.ResourceRecordSet.ResourceRecords { + if rr.Value != nil { + size += len(*rr.Value) + } + } + return size +} + +func changeCount(ch *route53.Change) int { + if *ch.Action == "UPSERT" { + return 2 + } + return 1 +} + +// collectRecords collects all TXT records below the given name. +func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) { + log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID)) + var req route53.ListResourceRecordSetsInput + req.SetHostedZoneId(c.zoneID) + existing := make(map[string]recordSet) + err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool { + for _, set := range resp.ResourceRecordSets { + if !isSubdomain(*set.Name, name) || *set.Type != "TXT" { + continue + } + s := recordSet{ttl: *set.TTL} + for _, rec := range set.ResourceRecords { + s.values = append(s.values, *rec.Value) + } + name := strings.TrimSuffix(*set.Name, ".") + existing[name] = s + } + return true + }) + return existing, err +} + +// newTXTChange creates a change to a TXT record. +func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change { + var c route53.Change + var r route53.ResourceRecordSet + var rrs []*route53.ResourceRecord + for _, val := range values { + rr := new(route53.ResourceRecord) + rr.SetValue(val) + rrs = append(rrs, rr) + } + r.SetType("TXT") + r.SetName(name) + r.SetTTL(ttl) + r.SetResourceRecords(rrs) + c.SetAction(action) + c.SetResourceRecordSet(&r) + return &c +} + +// isSubdomain returns true if name is a subdomain of domain. +func isSubdomain(name, domain string) bool { + domain = strings.TrimSuffix(domain, ".") + name = strings.TrimSuffix(name, ".") + return strings.HasSuffix("."+name, "."+domain) +} + +// splitTXT splits value into a list of quoted 255-character strings. +func splitTXT(value string) string { + var result strings.Builder + for len(value) > 0 { + rlen := len(value) + if rlen > 253 { + rlen = 253 + } + result.WriteString(strconv.Quote(value[:rlen])) + value = value[rlen:] + } + return result.String() +} diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go new file mode 100644 index 000000000000..a2ef3791f687 --- /dev/null +++ b/cmd/devp2p/dns_route53_test.go @@ -0,0 +1,166 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/service/route53" +) + +// This test checks that computeChanges/splitChanges create DNS changes in +// leaf-added -> root-changed -> leaf-deleted order. +func TestRoute53ChangeSort(t *testing.T) { + testTree0 := map[string]recordSet{ + "2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{ + `"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`, + }}, + "fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}}, + "n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}}, + } + + testTree1 := map[string]string{ + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", + "JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + "H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI", + "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", + } + + wantChanges := []*route53.Change{ + { + Action: sp("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("mhtdo6tmubria2xwg5ludack24.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("UPSERT"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`), + }}, + TTL: ip(rootTTL), + Type: sp("TXT"), + }, + }, + { + Action: sp("DELETE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"), + ResourceRecords: []*route53.ResourceRecord{ + {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, + }, + TTL: ip(3333), + Type: sp("TXT"), + }, + }, + { + Action: sp("DELETE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"), + ResourceRecords: []*route53.ResourceRecord{{ + Value: sp(`"enrtree-branch:"`), + }}, + TTL: ip(treeNodeTTL), + Type: sp("TXT"), + }, + }, + } + + var client route53Client + changes := client.computeChanges("n", testTree1, testTree0) + if !reflect.DeepEqual(changes, wantChanges) { + t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges)) + } + + // Check splitting according to size. + wantSplit := [][]*route53.Change{ + wantChanges[:4], + wantChanges[4:6], + wantChanges[6:], + } + split := splitChanges(changes, 600, 4000) + if !reflect.DeepEqual(split, wantSplit) { + t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit)) + } + + // Check splitting according to count. + wantSplit = [][]*route53.Change{ + wantChanges[:5], + wantChanges[5:], + } + split = splitChanges(changes, 10000, 6) + if !reflect.DeepEqual(split, wantSplit) { + t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit)) + } +} + +func sp(s string) *string { return &s } +func ip(i int64) *int64 { return &i } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index eb15764b04e2..f56f0f34e456 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -27,10 +27,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var ( @@ -42,6 +42,7 @@ var ( dnsSignCommand, dnsTXTCommand, dnsCloudflareCommand, + dnsRoute53Command, }, } dnsSyncCommand = cli.Command{ @@ -66,11 +67,18 @@ var ( } dnsCloudflareCommand = cli.Command{ Name: "to-cloudflare", - Usage: "Deploy DNS TXT records to cloudflare", + Usage: "Deploy DNS TXT records to CloudFlare", ArgsUsage: "", Action: dnsToCloudflare, Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag}, } + dnsRoute53Command = cli.Command{ + Name: "to-route53", + Usage: "Deploy DNS TXT records to Amazon Route53", + ArgsUsage: "", + Action: dnsToRoute53, + Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag}, + } ) var ( @@ -88,6 +96,11 @@ var ( } ) +const ( + rootTTL = 30 * 60 // 30 min + treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks +) + // dnsSync performs dnsSyncCommand. func dnsSync(ctx *cli.Context) error { var ( @@ -194,13 +207,26 @@ func dnsToCloudflare(ctx *cli.Context) error { return client.deploy(domain, t) } +// dnsToRoute53 peforms dnsRoute53Command. +func dnsToRoute53(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need tree definition directory as argument") + } + domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0)) + if err != nil { + return err + } + client := newRoute53Client(ctx) + return client.deploy(domain, t) +} + // loadSigningKey loads a private key in Ethereum keystore format. func loadSigningKey(keyfile string) *ecdsa.PrivateKey { keyjson, err := ioutil.ReadFile(keyfile) if err != nil { exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err)) } - password, _ := console.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") + password, _ := prompt.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") key, err := keystore.DecryptKey(keyjson, password) if err != nil { exit(fmt.Errorf("error decrypting key: %v", err)) @@ -214,8 +240,7 @@ func dnsClient(ctx *cli.Context) *dnsdisc.Client { if commandHasFlag(ctx, dnsTimeoutFlag) { cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name) } - c, _ := dnsdisc.NewClient(cfg) // cannot fail because no URLs given - return c + return dnsdisc.NewClient(cfg) } // There are two file formats for DNS node trees on disk: diff --git a/cmd/devp2p/enrcmd.go b/cmd/devp2p/enrcmd.go index 15d77dd011a8..48ede616ee16 100644 --- a/cmd/devp2p/enrcmd.go +++ b/cmd/devp2p/enrcmd.go @@ -21,6 +21,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "io" "io/ioutil" "net" "os" @@ -69,22 +70,30 @@ func enrdump(ctx *cli.Context) error { if err != nil { return fmt.Errorf("INVALID: %v", err) } - fmt.Print(dumpRecord(r)) + dumpRecord(os.Stdout, r) return nil } // dumpRecord creates a human-readable description of the given node record. -func dumpRecord(r *enr.Record) string { - out := new(bytes.Buffer) - if n, err := enode.New(enode.ValidSchemes, r); err != nil { +func dumpRecord(out io.Writer, r *enr.Record) { + n, err := enode.New(enode.ValidSchemes, r) + if err != nil { fmt.Fprintf(out, "INVALID: %v\n", err) } else { fmt.Fprintf(out, "Node ID: %v\n", n.ID()) + dumpNodeURL(out, n) } kv := r.AppendElements(nil)[1:] fmt.Fprintf(out, "Record has sequence number %d and %d key/value pairs.\n", r.Seq(), len(kv)/2) fmt.Fprint(out, dumpRecordKV(kv, 2)) - return out.String() +} + +func dumpNodeURL(out io.Writer, n *enode.Node) { + var key enode.Secp256k1 + if n.Load(&key) != nil { + return // no secp256k1 public key + } + fmt.Fprintf(out, "URLv4: %s\n", n.URLv4()) } func dumpRecordKV(kv []interface{}, indent int) string { diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go new file mode 100644 index 000000000000..d67387e80bf4 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -0,0 +1,167 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +type Chain struct { + blocks []*types.Block + chainConfig *params.ChainConfig +} + +func (c *Chain) WriteTo(writer io.Writer) error { + for _, block := range c.blocks { + if err := rlp.Encode(writer, block); err != nil { + return err + } + } + + return nil +} + +// Len returns the length of the chain. +func (c *Chain) Len() int { + return len(c.blocks) +} + +// TD calculates the total difficulty of the chain. +func (c *Chain) TD(height int) *big.Int { // TODO later on channge scheme so that the height is included in range + sum := big.NewInt(0) + for _, block := range c.blocks[:height] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +// ForkID gets the fork id of the chain. +func (c *Chain) ForkID() forkid.ID { + return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) +} + +// Shorten returns a copy chain of a desired height from the imported +func (c *Chain) Shorten(height int) *Chain { + blocks := make([]*types.Block, height) + copy(blocks, c.blocks[:height]) + + config := *c.chainConfig + return &Chain{ + blocks: blocks, + chainConfig: &config, + } +} + +// Head returns the chain head. +func (c *Chain) Head() *types.Block { + return c.blocks[c.Len()-1] +} + +func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) { + if req.Amount < 1 { + return nil, fmt.Errorf("no block headers requested") + } + + headers := make(BlockHeaders, req.Amount) + var blockNumber uint64 + + // range over blocks to check if our chain has the requested header + for _, block := range c.blocks { + if block.Hash() == req.Origin.Hash || block.Number().Uint64() == req.Origin.Number { + headers[0] = block.Header() + blockNumber = block.Number().Uint64() + } + } + if headers[0] == nil { + return nil, fmt.Errorf("no headers found for given origin number %v, hash %v", req.Origin.Number, req.Origin.Hash) + } + + if req.Reverse { + for i := 1; i < int(req.Amount); i++ { + blockNumber -= (1 - req.Skip) + headers[i] = c.blocks[blockNumber].Header() + + } + + return headers, nil + } + + for i := 1; i < int(req.Amount); i++ { + blockNumber += (1 + req.Skip) + headers[i] = c.blocks[blockNumber].Header() + } + + return headers, nil +} + +// loadChain takes the given chain.rlp file, and decodes and returns +// the blocks from the file. +func loadChain(chainfile string, genesis string) (*Chain, error) { + chainConfig, err := ioutil.ReadFile(genesis) + if err != nil { + return nil, err + } + var gen core.Genesis + if err := json.Unmarshal(chainConfig, &gen); err != nil { + return nil, err + } + gblock := gen.ToBlock(nil) + + // Load chain.rlp. + fh, err := os.Open(chainfile) + if err != nil { + return nil, err + } + defer fh.Close() + var reader io.Reader = fh + if strings.HasSuffix(chainfile, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return nil, err + } + } + stream := rlp.NewStream(reader, 0) + var blocks = make([]*types.Block, 1) + blocks[0] = gblock + for i := 0; ; i++ { + var b types.Block + if err := stream.Decode(&b); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("at block index %d: %v", i, err) + } + if b.NumberU64() != uint64(i+1) { + return nil, fmt.Errorf("block at index %d has wrong number %d", i, b.NumberU64()) + } + blocks = append(blocks, &b) + } + + c := &Chain{blocks: blocks, chainConfig: gen.Config} + return c, nil +} diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go new file mode 100644 index 000000000000..ac3907ce8dc7 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -0,0 +1,150 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "path/filepath" + "strconv" + "testing" + + "github.com/ethereum/go-ethereum/p2p" + "github.com/stretchr/testify/assert" +) + +// TestEthProtocolNegotiation tests whether the test suite +// can negotiate the highest eth protocol in a status message exchange +func TestEthProtocolNegotiation(t *testing.T) { + var tests = []struct { + conn *Conn + caps []p2p.Cap + expected uint32 + }{ + { + conn: &Conn{}, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{}, + caps: []p2p.Cap{ + {Name: "eth", Version: 0}, + {Name: "eth", Version: 89}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{}, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + tt.conn.negotiateEthProtocol(tt.caps) + assert.Equal(t, tt.expected, uint32(tt.conn.ethProtocolVersion)) + }) + } +} + +// TestChain_GetHeaders tests whether the test suite can correctly +// respond to a GetBlockHeaders request from a node. +func TestChain_GetHeaders(t *testing.T) { + chainFile, err := filepath.Abs("./testdata/chain.rlp") + if err != nil { + t.Fatal(err) + } + genesisFile, err := filepath.Abs("./testdata/genesis.json") + if err != nil { + t.Fatal(err) + } + + chain, err := loadChain(chainFile, genesisFile) + if err != nil { + t.Fatal(err) + } + + var tests = []struct { + req GetBlockHeaders + expected BlockHeaders + }{ + { + req: GetBlockHeaders{ + Origin: hashOrNumber{ + Number: uint64(2), + }, + Amount: uint64(5), + Skip: 1, + Reverse: false, + }, + expected: BlockHeaders{ + chain.blocks[2].Header(), + chain.blocks[4].Header(), + chain.blocks[6].Header(), + chain.blocks[8].Header(), + chain.blocks[10].Header(), + }, + }, + { + req: GetBlockHeaders{ + Origin: hashOrNumber{ + Number: uint64(chain.Len() - 1), + }, + Amount: uint64(3), + Skip: 0, + Reverse: true, + }, + expected: BlockHeaders{ + chain.blocks[chain.Len()-1].Header(), + chain.blocks[chain.Len()-2].Header(), + chain.blocks[chain.Len()-3].Header(), + }, + }, + { + req: GetBlockHeaders{ + Origin: hashOrNumber{ + Hash: chain.Head().Hash(), + }, + Amount: uint64(1), + Skip: 0, + Reverse: false, + }, + expected: BlockHeaders{ + chain.Head().Header(), + }, + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + headers, err := chain.GetHeaders(tt.req) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, headers, tt.expected) + }) + } +} diff --git a/cmd/devp2p/internal/ethtest/large.go b/cmd/devp2p/internal/ethtest/large.go new file mode 100644 index 000000000000..deca00be5384 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/large.go @@ -0,0 +1,80 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/rand" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// largeNumber returns a very large big.Int. +func largeNumber(megabytes int) *big.Int { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + bigint := new(big.Int) + bigint.SetBytes(buf) + return bigint +} + +// largeBuffer returns a very large buffer. +func largeBuffer(megabytes int) []byte { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return buf +} + +// largeString returns a very large string. +func largeString(megabytes int) string { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return hexutil.Encode(buf) +} + +func largeBlock() *types.Block { + return types.NewBlockWithHeader(largeHeader()) +} + +// Returns a random hash +func randHash() common.Hash { + var h common.Hash + rand.Read(h[:]) + return h +} + +func largeHeader() *types.Header { + return &types.Header{ + MixDigest: randHash(), + ReceiptHash: randHash(), + TxHash: randHash(), + Nonce: types.BlockNonce{}, + Extra: []byte{}, + Bloom: types.Bloom{}, + GasUsed: 0, + Coinbase: common.Address{}, + GasLimit: 0, + UncleHash: randHash(), + Time: 1337, + ParentHash: randHash(), + Root: randHash(), + Number: largeNumber(2), + Difficulty: largeNumber(2), + } +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go new file mode 100644 index 000000000000..5d0cdda720ab --- /dev/null +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -0,0 +1,426 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "net" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/rlpx" + "github.com/stretchr/testify/assert" +) + +var pretty = spew.ConfigState{ + Indent: " ", + DisableCapacities: true, + DisablePointerAddresses: true, + SortKeys: true, +} + +var timeout = 20 * time.Second + +// Suite represents a structure used to test the eth +// protocol of a node(s). +type Suite struct { + Dest *enode.Node + + chain *Chain + fullChain *Chain +} + +// NewSuite creates and returns a new eth-test suite that can +// be used to test the given node against the given blockchain +// data. +func NewSuite(dest *enode.Node, chainfile string, genesisfile string) *Suite { + chain, err := loadChain(chainfile, genesisfile) + if err != nil { + panic(err) + } + return &Suite{ + Dest: dest, + chain: chain.Shorten(1000), + fullChain: chain, + } +} + +func (s *Suite) AllTests() []utesting.Test { + return []utesting.Test{ + {Name: "Status", Fn: s.TestStatus}, + {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders}, + {Name: "Broadcast", Fn: s.TestBroadcast}, + {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestTransactions", Fn: s.TestTransaction}, + {Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx}, + } +} + +// TestStatus attempts to connect to the given node and exchange +// a status message with it, and then check to make sure +// the chain head is correct. +func (s *Suite) TestStatus(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + // get protoHandshake + conn.handshake(t) + // get status + switch msg := conn.statusExchange(t, s.chain, nil).(type) { + case *Status: + t.Logf("got status message: %s", pretty.Sdump(msg)) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestMaliciousStatus sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + // get protoHandshake + conn.handshake(t) + status := &Status{ + ProtocolVersion: uint32(conn.ethProtocolVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + switch msg := conn.statusExchange(t, s.chain, status).(type) { + case *Status: + t.Logf("%+v\n", msg) + default: + t.Fatalf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + return + default: + t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + +// TestGetBlockHeaders tests whether the given node can respond to +// a `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + + conn.handshake(t) + conn.statusExchange(t, s.chain, nil) + + // get block headers + req := &GetBlockHeaders{ + Origin: hashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + + if err := conn.Write(req); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *BlockHeaders: + headers := msg + for _, header := range *headers { + num := header.Number.Uint64() + t.Logf("received header (%d): %s", num, pretty.Sdump(header)) + assert.Equal(t, s.chain.blocks[int(num)].Header(), header) + } + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestGetBlockBodies tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate. +func (s *Suite) TestGetBlockBodies(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + + conn.handshake(t) + conn.statusExchange(t, s.chain, nil) + // create block bodies request + req := &GetBlockBodies{s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash()} + if err := conn.Write(req); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *BlockBodies: + t.Logf("received %d block bodies", len(*msg)) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestBroadcast tests whether a block announcement is correctly +// propagated to the given node's peer(s). +func (s *Suite) TestBroadcast(t *utesting.T) { + sendConn, receiveConn := s.setupConnection(t), s.setupConnection(t) + nextBlock := len(s.chain.blocks) + blockAnnouncement := &NewBlock{ + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + } + s.testAnnounce(t, sendConn, receiveConn, blockAnnouncement) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock(s.chain.Head()); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousHandshake tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + // Init the handshake + if err := conn.Write(handshake); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // check that the peer disconnected + timeout := 20 * time.Second + // Discard one hello + for i := 0; i < 2; i++ { + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Hello's are send concurrently, so ignore them + continue + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + } + // Dial for the next round + conn, err = s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + } +} + +// TestLargeAnnounce tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TD(nextBlock + 1), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + }, + } + + for i, blockAnnouncement := range blocks[0:3] { + t.Logf("Testing malicious announcement: %v\n", i) + sendConn := s.setupConnection(t) + if err := sendConn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := sendConn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + } + // Test the last block as a valid block + sendConn := s.setupConnection(t) + receiveConn := s.setupConnection(t) + s.testAnnounce(t, sendConn, receiveConn, blocks[3]) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil { + t.Fatal(err) + } +} + +func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { + // Announce the block. + if err := sendConn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + s.waitAnnounce(t, receiveConn, blockAnnouncement) +} + +func (s *Suite) waitAnnounce(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { + timeout := 20 * time.Second + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *NewBlock: + t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) + assert.Equal(t, + blockAnnouncement.Block.Header(), msg.Block.Header(), + "wrong block header in announcement", + ) + assert.Equal(t, + blockAnnouncement.TD, msg.TD, + "wrong TD in announcement", + ) + case *NewBlockHashes: + hashes := *msg + t.Logf("received NewBlockHashes message: %s", pretty.Sdump(hashes)) + assert.Equal(t, + blockAnnouncement.Block.Hash(), hashes[0].Hash, + "wrong block hash in announcement", + ) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +func (s *Suite) setupConnection(t *utesting.T) *Conn { + // create conn + sendConn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + sendConn.handshake(t) + sendConn.statusExchange(t, s.chain, nil) + return sendConn +} + +// dial attempts to dial the given node and perform a handshake, +// returning the created Conn if successful. +func (s *Suite) dial() (*Conn, error) { + var conn Conn + + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) + if err != nil { + return nil, err + } + conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey()) + + // do encHandshake + conn.ourKey, _ = crypto.GenerateKey() + _, err = conn.Handshake(conn.ourKey) + if err != nil { + return nil, err + } + + return &conn, nil +} + +func (s *Suite) TestTransaction(t *utesting.T) { + tests := []*types.Transaction{ + getNextTxFromChain(t, s), + unknownTx(t, s), + } + for i, tx := range tests { + t.Logf("Testing tx propagation: %v\n", i) + sendSuccessfulTx(t, s, tx) + } +} + +func (s *Suite) TestMaliciousTx(t *utesting.T) { + tests := []*types.Transaction{ + getOldTxFromChain(t, s), + invalidNonceTx(t, s), + hugeAmount(t, s), + hugeGasPrice(t, s), + hugeData(t, s), + } + for i, tx := range tests { + t.Logf("Testing malicious tx propagation: %v\n", i) + sendFailingTx(t, s, tx) + } +} diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp b/cmd/devp2p/internal/ethtest/testdata/chain.rlp new file mode 100644 index 000000000000..5ebc2f3bb788 Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/chain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json new file mode 100644 index 000000000000..d8b5d225024e --- /dev/null +++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json @@ -0,0 +1,26 @@ +{ + "config": { + "chainId": 19763, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "ethash": {} + }, + "nonce": "0xdeadbeefdeadbeef", + "timestamp": "0x0", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x80000000", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "71562b71999873db5b286df957af199ec94617f7": { + "balance": "0xffffffffffffffffffffffffff" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp new file mode 100644 index 000000000000..1a820734e10c Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go new file mode 100644 index 000000000000..4aaab8bf97d4 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -0,0 +1,180 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" +) + +//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + +func sendSuccessfulTx(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn := s.setupConnection(t) + t.Logf("sending tx: %v %v %v\n", tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // Send the transaction + if err := sendConn.Write(Transactions([]*types.Transaction{tx})); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + recvConn := s.setupConnection(t) + // Wait for the transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + if len(recTxs) < 1 { + t.Fatalf("received transactions do not match send: %v", recTxs) + } + if tx.Hash() != recTxs[len(recTxs)-1].Hash() { + t.Fatalf("received transactions do not match send: got %v want %v", recTxs, tx) + } + case *NewPooledTransactionHashes: + txHashes := *msg + if len(txHashes) < 1 { + t.Fatalf("received transactions do not match send: %v", txHashes) + } + if tx.Hash() != txHashes[len(txHashes)-1] { + t.Fatalf("wrong announcement received, wanted %v got %v", tx, txHashes) + } + default: + t.Fatalf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) + } +} + +func sendFailingTx(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn, recvConn := s.setupConnection(t), s.setupConnection(t) + // Wait for a transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *NewPooledTransactionHashes: + break + default: + t.Logf("unexpected message, logging: %v", pretty.Sdump(msg)) + } + // Send the transaction + if err := sendConn.Write(Transactions([]*types.Transaction{tx})); err != nil { + t.Fatal(err) + } + // Wait for another transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *Transactions: + t.Fatalf("Received unexpected transaction announcement: %v", msg) + case *NewPooledTransactionHashes: + t.Fatalf("Received unexpected pooledTx announcement: %v", msg) + case *Error: + // Transaction should not be announced -> wait for timeout + return + default: + t.Fatalf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + } +} + +func unknownTx(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction { + // Get a new transaction + var tx *types.Transaction + for _, blocks := range s.fullChain.blocks[s.chain.Len():] { + txs := blocks.Transactions() + if txs.Len() != 0 { + tx = txs[0] + break + } + } + if tx == nil { + t.Fatal("could not find transaction") + } + return tx +} + +func getOldTxFromChain(t *utesting.T, s *Suite) *types.Transaction { + var tx *types.Transaction + for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { + txs := blocks.Transactions() + if txs.Len() != 0 { + tx = txs[0] + break + } + } + if tx == nil { + t.Fatal("could not find transaction") + } + return tx +} + +func invalidNonceTx(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeAmount(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + amount := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + gasPrice := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeData(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) + return signWithFaucet(t, txNew) +} + +func signWithFaucet(t *utesting.T, tx *types.Transaction) *types.Transaction { + signer := types.HomesteadSigner{} + signedTx, err := types.SignTx(tx, signer, faucetKey) + if err != nil { + t.Fatalf("could not sign tx: %v\n", err) + } + return signedTx +} diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go new file mode 100644 index 000000000000..f768d61d5ae7 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/types.go @@ -0,0 +1,395 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/ecdsa" + "fmt" + "io" + "math/big" + "reflect" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" + "github.com/ethereum/go-ethereum/rlp" +) + +type Message interface { + Code() int +} + +type Error struct { + err error +} + +func (e *Error) Unwrap() error { return e.err } +func (e *Error) Error() string { return e.err.Error() } +func (e *Error) Code() int { return -1 } +func (e *Error) String() string { return e.Error() } + +func errorf(format string, args ...interface{}) *Error { + return &Error{fmt.Errorf(format, args...)} +} + +// Hello is the RLP structure of the protocol handshake. +type Hello struct { + Version uint64 + Name string + Caps []p2p.Cap + ListenPort uint64 + ID []byte // secp256k1 public key + + // Ignore additional fields (for forward compatibility). + Rest []rlp.RawValue `rlp:"tail"` +} + +func (h Hello) Code() int { return 0x00 } + +// Disconnect is the RLP structure for a disconnect message. +type Disconnect struct { + Reason p2p.DiscReason +} + +func (d Disconnect) Code() int { return 0x01 } + +type Ping struct{} + +func (p Ping) Code() int { return 0x02 } + +type Pong struct{} + +func (p Pong) Code() int { return 0x03 } + +// Status is the network packet for the status message for eth/64 and later. +type Status struct { + ProtocolVersion uint32 + NetworkID uint64 + TD *big.Int + Head common.Hash + Genesis common.Hash + ForkID forkid.ID +} + +func (s Status) Code() int { return 16 } + +// NewBlockHashes is the network packet for the block announcements. +type NewBlockHashes []struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced +} + +func (nbh NewBlockHashes) Code() int { return 17 } + +type Transactions []*types.Transaction + +func (t Transactions) Code() int { return 18 } + +// GetBlockHeaders represents a block header query. +type GetBlockHeaders struct { + Origin hashOrNumber // Block from which to retrieve headers + Amount uint64 // Maximum number of headers to retrieve + Skip uint64 // Blocks to skip between consecutive headers + Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) +} + +func (g GetBlockHeaders) Code() int { return 19 } + +type BlockHeaders []*types.Header + +func (bh BlockHeaders) Code() int { return 20 } + +// GetBlockBodies represents a GetBlockBodies request +type GetBlockBodies []common.Hash + +func (gbb GetBlockBodies) Code() int { return 21 } + +// BlockBodies is the network packet for block content distribution. +type BlockBodies []*types.Body + +func (bb BlockBodies) Code() int { return 22 } + +// NewBlock is the network packet for the block propagation message. +type NewBlock struct { + Block *types.Block + TD *big.Int +} + +func (nb NewBlock) Code() int { return 23 } + +// NewPooledTransactionHashes is the network packet for the tx hash propagation message. +type NewPooledTransactionHashes [][32]byte + +func (nb NewPooledTransactionHashes) Code() int { return 24 } + +// HashOrNumber is a combined field for specifying an origin block. +type hashOrNumber struct { + Hash common.Hash // Block hash from which to retrieve headers (excludes Number) + Number uint64 // Block hash from which to retrieve headers (excludes Hash) +} + +// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the +// two contained union fields. +func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { + if hn.Hash == (common.Hash{}) { + return rlp.Encode(w, hn.Number) + } + if hn.Number != 0 { + return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) + } + return rlp.Encode(w, hn.Hash) +} + +// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents +// into either a block hash or a block number. +func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { + _, size, _ := s.Kind() + origin, err := s.Raw() + if err == nil { + switch { + case size == 32: + err = rlp.DecodeBytes(origin, &hn.Hash) + case size <= 8: + err = rlp.DecodeBytes(origin, &hn.Number) + default: + err = fmt.Errorf("invalid input size %d for origin", size) + } + } + return err +} + +// Conn represents an individual connection with a peer +type Conn struct { + *rlpx.Conn + ourKey *ecdsa.PrivateKey + ethProtocolVersion uint +} + +func (c *Conn) Read() Message { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return errorf("could not read from connection: %v", err) + } + + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + msg = new(GetBlockHeaders) + case (BlockHeaders{}).Code(): + msg = new(BlockHeaders) + case (GetBlockBodies{}).Code(): + msg = new(GetBlockBodies) + case (BlockBodies{}).Code(): + msg = new(BlockBodies) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + default: + return errorf("invalid message code: %d", code) + } + + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return errorf("could not rlp decode message: %v", err) + } + return msg +} + +// ReadAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message { + start := time.Now() + for time.Since(start) < timeout { + timeout := time.Now().Add(10 * time.Second) + c.SetReadDeadline(timeout) + switch msg := c.Read().(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + req := *msg + headers, err := chain.GetHeaders(req) + if err != nil { + return errorf("could not get headers for inbound header request: %v", err) + } + + if err := c.Write(headers); err != nil { + return errorf("could not write to connection: %v", err) + } + default: + return msg + } + } + return errorf("no message received within %v", timeout) +} + +func (c *Conn) Write(msg Message) error { + payload, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + _, err = c.Conn.Write(uint64(msg.Code()), payload) + return err +} + +// handshake checks to make sure a `HELLO` is received. +func (c *Conn) handshake(t *utesting.T) Message { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(10 * time.Second)) + + // write hello to client + pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] + ourHandshake := &Hello{ + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: pub0, + } + if err := c.Write(ourHandshake); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // read hello from client + switch msg := c.Read().(type) { + case *Hello: + // set snappy if version is at least 5 + if msg.Version >= 5 { + c.SetSnappy(true) + } + c.negotiateEthProtocol(msg.Caps) + if c.ethProtocolVersion == 0 { + t.Fatalf("unexpected eth protocol version") + } + return msg + default: + t.Fatalf("bad handshake: %#v", msg) + return nil + } +} + +// negotiateEthProtocol sets the Conn's eth protocol version +// to highest advertised capability from peer +func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { + var highestEthVersion uint + for _, capability := range caps { + if capability.Name != "eth" { + continue + } + if capability.Version > highestEthVersion && capability.Version <= 65 { + highestEthVersion = capability.Version + } + } + c.ethProtocolVersion = highestEthVersion +} + +// statusExchange performs a `Status` message exchange with the given +// node. +func (c *Conn) statusExchange(t *utesting.T, chain *Chain, status *Status) Message { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(20 * time.Second)) + + // read status message from client + var message Message +loop: + for { + switch msg := c.Read().(type) { + case *Status: + if msg.Head != chain.blocks[chain.Len()-1].Hash() { + t.Fatalf("wrong head block in status: %s", msg.Head.String()) + } + if msg.TD.Cmp(chain.TD(chain.Len())) != 0 { + t.Fatalf("wrong TD in status: %v", msg.TD) + } + if !reflect.DeepEqual(msg.ForkID, chain.ForkID()) { + t.Fatalf("wrong fork ID in status: %v", msg.ForkID) + } + message = msg + break loop + case *Disconnect: + t.Fatalf("disconnect received: %v", msg.Reason) + case *Ping: + c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error + // (PINGs should not be a response upon fresh connection) + default: + t.Fatalf("bad status message: %s", pretty.Sdump(msg)) + } + } + // make sure eth protocol version is set for negotiation + if c.ethProtocolVersion == 0 { + t.Fatalf("eth protocol version must be set in Conn") + } + if status == nil { + // write status message to client + status = &Status{ + ProtocolVersion: uint32(c.ethProtocolVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(chain.Len()), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + } + + if err := c.Write(*status); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + return message +} + +// waitForBlock waits for confirmation from the client that it has +// imported the given block. +func (c *Conn) waitForBlock(block *types.Block) error { + defer c.SetReadDeadline(time.Time{}) + + timeout := time.Now().Add(20 * time.Second) + c.SetReadDeadline(timeout) + for { + req := &GetBlockHeaders{Origin: hashOrNumber{Hash: block.Hash()}, Amount: 1} + if err := c.Write(req); err != nil { + return err + } + switch msg := c.Read().(type) { + case *BlockHeaders: + if len(*msg) > 0 { + return nil + } + time.Sleep(100 * time.Millisecond) + default: + return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } + } +} diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go new file mode 100644 index 000000000000..140b96bfa568 --- /dev/null +++ b/cmd/devp2p/internal/v4test/discv4tests.go @@ -0,0 +1,467 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v4test + +import ( + "bytes" + "crypto/rand" + "fmt" + "net" + "reflect" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" +) + +const ( + expiration = 20 * time.Second + wrongPacket = 66 + macSize = 256 / 8 +) + +var ( + // Remote node under test + Remote string + // IP where the first tester is listening, port will be assigned + Listen1 string = "127.0.0.1" + // IP where the second tester is listening, port will be assigned + // Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least) + Listen2 string = "127.0.0.2" +) + +type pingWithJunk struct { + Version uint + From, To v4wire.Endpoint + Expiration uint64 + JunkData1 uint + JunkData2 []byte +} + +func (req *pingWithJunk) Name() string { return "PING/v4" } +func (req *pingWithJunk) Kind() byte { return v4wire.PingPacket } + +type pingWrongType struct { + Version uint + From, To v4wire.Endpoint + Expiration uint64 +} + +func (req *pingWrongType) Name() string { return "WRONG/v4" } +func (req *pingWrongType) Kind() byte { return wrongPacket } + +func futureExpiration() uint64 { + return uint64(time.Now().Add(expiration).Unix()) +} + +// This test just sends a PING packet and expects a response. +func BasicPing(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// checkPong verifies that reply is a valid PONG matching the given ping hash. +func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error { + if reply == nil || reply.Kind() != v4wire.PongPacket { + return fmt.Errorf("expected PONG reply, got %v", reply) + } + pong := reply.(*v4wire.Pong) + if !bytes.Equal(pong.ReplyTok, pingHash) { + return fmt.Errorf("PONG reply token mismatch: got %x, want %x", pong.ReplyTok, pingHash) + } + wantEndpoint := te.localEndpoint(te.l1) + if !reflect.DeepEqual(pong.To, wantEndpoint) { + return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, wantEndpoint) + } + if v4wire.Expired(pong.Expiration) { + return fmt.Errorf("PONG is expired (%v)", pong.Expiration) + } + return nil +} + +// This test sends a PING packet with wrong 'to' field and expects a PONG response. +func PingWrongTo(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: wrongEndpoint, + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with wrong 'from' field and expects a PONG response. +func PingWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with additional data at the end and expects a PONG +// response. The remote node should respond because EIP-8 mandates ignoring additional +// trailing data. +func PingExtraData(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + pingHash := te.send(te.l1, &pingWithJunk{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + JunkData1: 42, + JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, + }) + + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with additional data and wrong 'from' field +// and expects a PONG response. +func PingExtraDataWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + req := pingWithJunk{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + JunkData1: 42, + JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, + } + pingHash := te.send(te.l1, &req) + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with an expiration in the past. +// The remote node should not respond. +func PingPastExpiration(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: -futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if reply != nil { + t.Fatal("Expected no reply, got", reply) + } +} + +// This test sends an invalid packet. The remote node should not respond. +func WrongPacketType(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + te.send(te.l1, &pingWrongType{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if reply != nil { + t.Fatal("Expected no reply, got", reply) + } +} + +// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by +// the bonding process. After bonding, it pings the target with a different from endpoint. +func BondThenPingWithWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test just sends FINDNODE. The remote node should not reply +// because the endpoint proof has not completed. +func FindnodeWithoutEndpointProof(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + req := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(req.Target[:]) + te.send(te.l1, &req) + + reply, _, _ := te.read(te.l1) + if reply != nil { + t.Fatal("Expected no response, got", reply) + } +} + +// BasicFindnode sends a FINDNODE request after performing the endpoint +// proof. The remote node should respond. +func BasicFindnode(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + reply, _, err := te.read(te.l1) + if err != nil { + t.Fatal("read find nodes", err) + } + if reply.Kind() != v4wire.NeighborsPacket { + t.Fatal("Expected neighbors, got", reply.Name()) + } +} + +// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends +// FINDNODE to read the remote table. The remote node should not return the node contained +// in the unsolicited NEIGHBORS packet. +func UnsolicitedNeighbors(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + // Send unsolicited NEIGHBORS response. + fakeKey, _ := crypto.GenerateKey() + encFakeKey := v4wire.EncodePubkey(&fakeKey.PublicKey) + neighbors := v4wire.Neighbors{ + Expiration: futureExpiration(), + Nodes: []v4wire.Node{{ + ID: encFakeKey, + IP: net.IP{1, 2, 3, 4}, + UDP: 30303, + TCP: 30303, + }}, + } + te.send(te.l1, &neighbors) + + // Check if the remote node included the fake node. + te.send(te.l1, &v4wire.Findnode{ + Expiration: futureExpiration(), + Target: encFakeKey, + }) + + reply, _, err := te.read(te.l1) + if err != nil { + t.Fatal("read find nodes", err) + } + if reply.Kind() != v4wire.NeighborsPacket { + t.Fatal("Expected neighbors, got", reply.Name()) + } + nodes := reply.(*v4wire.Neighbors).Nodes + if contains(nodes, encFakeKey) { + t.Fatal("neighbors response contains node from earlier unsolicited neighbors response") + } +} + +// This test sends FINDNODE with an expiration timestamp in the past. +// The remote node should not respond. +func FindnodePastExpiration(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + findnode := v4wire.Findnode{Expiration: -futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + for { + reply, _, _ := te.read(te.l1) + if reply == nil { + return + } else if reply.Kind() == v4wire.NeighborsPacket { + t.Fatal("Unexpected NEIGHBORS response for expired FINDNODE request") + } + } +} + +// bond performs the endpoint proof with the remote node. +func bond(t *utesting.T, te *testenv) { + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + var gotPing, gotPong bool + for !gotPing || !gotPong { + req, hash, err := te.read(te.l1) + if err != nil { + t.Fatal(err) + } + switch req.(type) { + case *v4wire.Ping: + te.send(te.l1, &v4wire.Pong{ + To: te.remoteEndpoint(), + ReplyTok: hash, + Expiration: futureExpiration(), + }) + gotPing = true + case *v4wire.Pong: + // TODO: maybe verify pong data here + gotPong = true + } + } +} + +// This test attempts to perform a traffic amplification attack against a +// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker +// attempts to complete the endpoint proof non-interactively by sending a PONG +// with mismatching reply token from the 'victim' endpoint. The attack works if +// the remote node does not verify the PONG reply token field correctly. The +// attacker could then perform traffic amplification by sending many FINDNODE +// requests to the discovery node, which would reply to the 'victim' address. +func FindnodeAmplificationInvalidPongHash(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + // Send PING to start endpoint verification. + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + var gotPing, gotPong bool + for !gotPing || !gotPong { + req, _, err := te.read(te.l1) + if err != nil { + t.Fatal(err) + } + switch req.(type) { + case *v4wire.Ping: + // Send PONG from this node ID, but with invalid ReplyTok. + te.send(te.l1, &v4wire.Pong{ + To: te.remoteEndpoint(), + ReplyTok: make([]byte, macSize), + Expiration: futureExpiration(), + }) + gotPing = true + case *v4wire.Pong: + gotPong = true + } + } + + // Now send FINDNODE. The remote node should not respond because our + // PONG did not reference the PING hash. + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + // If we receive a NEIGHBORS response, the attack worked and the test fails. + reply, _, _ := te.read(te.l1) + if reply != nil && reply.Kind() == v4wire.NeighborsPacket { + t.Error("Got neighbors") + } +} + +// This test attempts to perform a traffic amplification attack using FINDNODE. +// The attack works if the remote node does not verify the IP address of FINDNODE +// against the endpoint verification proof done by PING/PONG. +func FindnodeAmplificationWrongIP(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + // Do the endpoint proof from the l1 IP. + bond(t, te) + + // Now send FINDNODE from the same node ID, but different IP address. + // The remote node should not respond. + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l2, &findnode) + + // If we receive a NEIGHBORS response, the attack worked and the test fails. + reply, _, _ := te.read(te.l2) + if reply != nil { + t.Error("Got NEIGHORS response for FINDNODE from wrong IP") + } +} + +var AllTests = []utesting.Test{ + {Name: "Ping/Basic", Fn: BasicPing}, + {Name: "Ping/WrongTo", Fn: PingWrongTo}, + {Name: "Ping/WrongFrom", Fn: PingWrongFrom}, + {Name: "Ping/ExtraData", Fn: PingExtraData}, + {Name: "Ping/ExtraDataWrongFrom", Fn: PingExtraDataWrongFrom}, + {Name: "Ping/PastExpiration", Fn: PingPastExpiration}, + {Name: "Ping/WrongPacketType", Fn: WrongPacketType}, + {Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom}, + {Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof}, + {Name: "Findnode/BasicFindnode", Fn: BasicFindnode}, + {Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors}, + {Name: "Findnode/PastExpiration", Fn: FindnodePastExpiration}, + {Name: "Amplification/InvalidPongHash", Fn: FindnodeAmplificationInvalidPongHash}, + {Name: "Amplification/WrongIP", Fn: FindnodeAmplificationWrongIP}, +} diff --git a/cmd/devp2p/internal/v4test/framework.go b/cmd/devp2p/internal/v4test/framework.go new file mode 100644 index 000000000000..92865941810b --- /dev/null +++ b/cmd/devp2p/internal/v4test/framework.go @@ -0,0 +1,123 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v4test + +import ( + "crypto/ecdsa" + "fmt" + "net" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const waitTime = 300 * time.Millisecond + +type testenv struct { + l1, l2 net.PacketConn + key *ecdsa.PrivateKey + remote *enode.Node + remoteAddr *net.UDPAddr +} + +func newTestEnv(remote string, listen1, listen2 string) *testenv { + l1, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen1)) + if err != nil { + panic(err) + } + l2, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen2)) + if err != nil { + panic(err) + } + key, err := crypto.GenerateKey() + if err != nil { + panic(err) + } + node, err := enode.Parse(enode.ValidSchemes, remote) + if err != nil { + panic(err) + } + if node.IP() == nil || node.UDP() == 0 { + var ip net.IP + var tcpPort, udpPort int + if ip = node.IP(); ip == nil { + ip = net.ParseIP("127.0.0.1") + } + if tcpPort = node.TCP(); tcpPort == 0 { + tcpPort = 30303 + } + if udpPort = node.TCP(); udpPort == 0 { + udpPort = 30303 + } + node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort) + } + addr := &net.UDPAddr{IP: node.IP(), Port: node.UDP()} + return &testenv{l1, l2, key, node, addr} +} + +func (te *testenv) close() { + te.l1.Close() + te.l2.Close() +} + +func (te *testenv) send(c net.PacketConn, req v4wire.Packet) []byte { + packet, hash, err := v4wire.Encode(te.key, req) + if err != nil { + panic(fmt.Errorf("can't encode %v packet: %v", req.Name(), err)) + } + if _, err := c.WriteTo(packet, te.remoteAddr); err != nil { + panic(fmt.Errorf("can't send %v: %v", req.Name(), err)) + } + return hash +} + +func (te *testenv) read(c net.PacketConn) (v4wire.Packet, []byte, error) { + buf := make([]byte, 2048) + if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil { + return nil, nil, err + } + n, _, err := c.ReadFrom(buf) + if err != nil { + return nil, nil, err + } + p, _, hash, err := v4wire.Decode(buf[:n]) + return p, hash, err +} + +func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint { + addr := c.LocalAddr().(*net.UDPAddr) + return v4wire.Endpoint{ + IP: addr.IP.To4(), + UDP: uint16(addr.Port), + TCP: 0, + } +} + +func (te *testenv) remoteEndpoint() v4wire.Endpoint { + return v4wire.NewEndpoint(te.remoteAddr, 0) +} + +func contains(ns []v4wire.Node, key v4wire.Pubkey) bool { + for _, n := range ns { + if n.ID == key { + return true + } + } + return false +} diff --git a/cmd/devp2p/internal/v5test/discv5tests.go b/cmd/devp2p/internal/v5test/discv5tests.go new file mode 100644 index 000000000000..7866498f7376 --- /dev/null +++ b/cmd/devp2p/internal/v5test/discv5tests.go @@ -0,0 +1,377 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v5test + +import ( + "bytes" + "net" + "sync" + "time" + + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p/discover/v5wire" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/netutil" +) + +// Suite is the discv5 test suite. +type Suite struct { + Dest *enode.Node + Listen1, Listen2 string // listening addresses +} + +func (s *Suite) listen1(log logger) (*conn, net.PacketConn) { + c := newConn(s.Dest, log) + l := c.listen(s.Listen1) + return c, l +} + +func (s *Suite) listen2(log logger) (*conn, net.PacketConn, net.PacketConn) { + c := newConn(s.Dest, log) + l1, l2 := c.listen(s.Listen1), c.listen(s.Listen2) + return c, l1, l2 +} + +func (s *Suite) AllTests() []utesting.Test { + return []utesting.Test{ + {Name: "Ping", Fn: s.TestPing}, + {Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID}, + {Name: "PingMultiIP", Fn: s.TestPingMultiIP}, + {Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted}, + {Name: "TalkRequest", Fn: s.TestTalkRequest}, + {Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance}, + {Name: "FindnodeResults", Fn: s.TestFindnodeResults}, + } +} + +// This test sends PING and expects a PONG response. +func (s *Suite) TestPing(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + switch resp := conn.reqresp(l1, ping).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping, l1) + default: + t.Fatal("expected PONG, got", resp.Name()) + } +} + +func checkPong(t *utesting.T, pong *v5wire.Pong, ping *v5wire.Ping, c net.PacketConn) { + if !bytes.Equal(pong.ReqID, ping.ReqID) { + t.Fatalf("wrong request ID %x in PONG, want %x", pong.ReqID, ping.ReqID) + } + if !pong.ToIP.Equal(laddr(c).IP) { + t.Fatalf("wrong destination IP %v in PONG, want %v", pong.ToIP, laddr(c).IP) + } + if int(pong.ToPort) != laddr(c).Port { + t.Fatalf("wrong destination port %v in PONG, want %v", pong.ToPort, laddr(c).Port) + } +} + +// This test sends PING with a 9-byte request ID, which isn't allowed by the spec. +// The remote node should not respond. +func (s *Suite) TestPingLargeRequestID(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + ping := &v5wire.Ping{ReqID: make([]byte, 9)} + switch resp := conn.reqresp(l1, ping).(type) { + case *v5wire.Pong: + t.Errorf("PONG response with unknown request ID %x", resp.ReqID) + case *readError: + if resp.err == v5wire.ErrInvalidReqID { + t.Error("response with oversized request ID") + } else if !netutil.IsTimeout(resp.err) { + t.Error(resp) + } + } +} + +// In this test, a session is established from one IP as usual. The session is then reused +// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for +// the attempt from a different IP. +func (s *Suite) TestPingMultiIP(t *utesting.T) { + conn, l1, l2 := s.listen2(t) + defer conn.close() + + // Create the session on l1. + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + resp := conn.reqresp(l1, ping) + if resp.Kind() != v5wire.PongMsg { + t.Fatal("expected PONG, got", resp) + } + checkPong(t, resp.(*v5wire.Pong), ping, l1) + + // Send on l2. This reuses the session because there is only one codec. + ping2 := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l2, ping2, nil) + switch resp := conn.read(l2).(type) { + case *v5wire.Pong: + t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l2).IP, laddr(l1).IP) + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for new session as expected") + resp.Node = s.Dest + conn.write(l2, ping2, resp) + default: + t.Fatal("expected WHOAREYOU, got", resp) + } + + // Catch the PONG on l2. + switch resp := conn.read(l2).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping2, l2) + default: + t.Fatal("expected PONG, got", resp) + } + + // Try on l1 again. + ping3 := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l1, ping3, nil) + switch resp := conn.read(l1).(type) { + case *v5wire.Pong: + t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l1).IP, laddr(l2).IP) + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for new session as expected") + default: + t.Fatal("expected WHOAREYOU, got", resp) + } +} + +// This test starts a handshake, but doesn't finish it and sends a second ordinary message +// packet instead of a handshake message packet. The remote node should respond with +// another WHOAREYOU challenge for the second packet. +func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + // First PING triggers challenge. + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l1, ping, nil) + switch resp := conn.read(l1).(type) { + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for PING") + default: + t.Fatal("expected WHOAREYOU, got", resp) + } + + // Send second PING. + ping2 := &v5wire.Ping{ReqID: conn.nextReqID()} + switch resp := conn.reqresp(l1, ping2).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping2, l1) + default: + t.Fatal("expected WHOAREYOU, got", resp) + } +} + +// This test sends TALKREQ and expects an empty TALKRESP response. +func (s *Suite) TestTalkRequest(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + // Non-empty request ID. + id := conn.nextReqID() + resp := conn.reqresp(l1, &v5wire.TalkRequest{ReqID: id, Protocol: "test-protocol"}) + switch resp := resp.(type) { + case *v5wire.TalkResponse: + if !bytes.Equal(resp.ReqID, id) { + t.Fatalf("wrong request ID %x in TALKRESP, want %x", resp.ReqID, id) + } + if len(resp.Message) > 0 { + t.Fatalf("non-empty message %x in TALKRESP", resp.Message) + } + default: + t.Fatal("expected TALKRESP, got", resp.Name()) + } + + // Empty request ID. + resp = conn.reqresp(l1, &v5wire.TalkRequest{Protocol: "test-protocol"}) + switch resp := resp.(type) { + case *v5wire.TalkResponse: + if len(resp.ReqID) > 0 { + t.Fatalf("wrong request ID %x in TALKRESP, want empty byte array", resp.ReqID) + } + if len(resp.Message) > 0 { + t.Fatalf("non-empty message %x in TALKRESP", resp.Message) + } + default: + t.Fatal("expected TALKRESP, got", resp.Name()) + } +} + +// This test checks that the remote node returns itself for FINDNODE with distance zero. +func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + nodes, err := conn.findnode(l1, []uint{0}) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 1 { + t.Fatalf("remote returned more than one node for FINDNODE [0]") + } + if nodes[0].ID() != conn.remote.ID() { + t.Errorf("ID of response node is %v, want %v", nodes[0].ID(), conn.remote.ID()) + } +} + +// In this test, multiple nodes ping the node under test. After waiting for them to be +// accepted into the remote table, the test checks that they are returned by FINDNODE. +func (s *Suite) TestFindnodeResults(t *utesting.T) { + // Create bystanders. + nodes := make([]*bystander, 5) + added := make(chan enode.ID, len(nodes)) + for i := range nodes { + nodes[i] = newBystander(t, s, added) + defer nodes[i].close() + } + + // Get them added to the remote table. + timeout := 60 * time.Second + timeoutCh := time.After(timeout) + for count := 0; count < len(nodes); { + select { + case id := <-added: + t.Logf("bystander node %v added to remote table", id) + count++ + case <-timeoutCh: + t.Errorf("remote added %d bystander nodes in %v, need %d to continue", count, timeout, len(nodes)) + t.Logf("this can happen if the node has a non-empty table from previous runs") + return + } + } + t.Logf("all %d bystander nodes were added", len(nodes)) + + // Collect our nodes by distance. + var dists []uint + expect := make(map[enode.ID]*enode.Node) + for _, bn := range nodes { + n := bn.conn.localNode.Node() + expect[n.ID()] = n + d := uint(enode.LogDist(n.ID(), s.Dest.ID())) + if !containsUint(dists, d) { + dists = append(dists, d) + } + } + + // Send FINDNODE for all distances. + conn, l1 := s.listen1(t) + defer conn.close() + foundNodes, err := conn.findnode(l1, dists) + if err != nil { + t.Fatal(err) + } + t.Logf("remote returned %d nodes for distance list %v", len(foundNodes), dists) + for _, n := range foundNodes { + delete(expect, n.ID()) + } + if len(expect) > 0 { + t.Errorf("missing %d nodes in FINDNODE result", len(expect)) + t.Logf("this can happen if the test is run multiple times in quick succession") + t.Logf("and the remote node hasn't removed dead nodes from previous runs yet") + } else { + t.Logf("all %d expected nodes were returned", len(nodes)) + } +} + +// A bystander is a node whose only purpose is filling a spot in the remote table. +type bystander struct { + dest *enode.Node + conn *conn + l net.PacketConn + + addedCh chan enode.ID + done sync.WaitGroup +} + +func newBystander(t *utesting.T, s *Suite, added chan enode.ID) *bystander { + conn, l := s.listen1(t) + conn.setEndpoint(l) // bystander nodes need IP/port to get pinged + bn := &bystander{ + conn: conn, + l: l, + dest: s.Dest, + addedCh: added, + } + bn.done.Add(1) + go bn.loop() + return bn +} + +// id returns the node ID of the bystander. +func (bn *bystander) id() enode.ID { + return bn.conn.localNode.ID() +} + +// close shuts down loop. +func (bn *bystander) close() { + bn.conn.close() + bn.done.Wait() +} + +// loop answers packets from the remote node until quit. +func (bn *bystander) loop() { + defer bn.done.Done() + + var ( + lastPing time.Time + wasAdded bool + ) + for { + // Ping the remote node. + if !wasAdded && time.Since(lastPing) > 10*time.Second { + bn.conn.reqresp(bn.l, &v5wire.Ping{ + ReqID: bn.conn.nextReqID(), + ENRSeq: bn.dest.Seq(), + }) + lastPing = time.Now() + } + // Answer packets. + switch p := bn.conn.read(bn.l).(type) { + case *v5wire.Ping: + bn.conn.write(bn.l, &v5wire.Pong{ + ReqID: p.ReqID, + ENRSeq: bn.conn.localNode.Seq(), + ToIP: bn.dest.IP(), + ToPort: uint16(bn.dest.UDP()), + }, nil) + wasAdded = true + bn.notifyAdded() + case *v5wire.Findnode: + bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, Total: 1}, nil) + wasAdded = true + bn.notifyAdded() + case *v5wire.TalkRequest: + bn.conn.write(bn.l, &v5wire.TalkResponse{ReqID: p.ReqID}, nil) + case *readError: + if !netutil.IsTemporaryError(p.err) { + bn.conn.logf("shutting down: %v", p.err) + return + } + } + } +} + +func (bn *bystander) notifyAdded() { + if bn.addedCh != nil { + bn.addedCh <- bn.id() + bn.addedCh = nil + } +} diff --git a/cmd/devp2p/internal/v5test/framework.go b/cmd/devp2p/internal/v5test/framework.go new file mode 100644 index 000000000000..9eac37520f7b --- /dev/null +++ b/cmd/devp2p/internal/v5test/framework.go @@ -0,0 +1,263 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v5test + +import ( + "bytes" + "crypto/ecdsa" + "encoding/binary" + "fmt" + "net" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover/v5wire" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" +) + +// readError represents an error during packet reading. +// This exists to facilitate type-switching on the result of conn.read. +type readError struct { + err error +} + +func (p *readError) Kind() byte { return 99 } +func (p *readError) Name() string { return fmt.Sprintf("error: %v", p.err) } +func (p *readError) Error() string { return p.err.Error() } +func (p *readError) Unwrap() error { return p.err } +func (p *readError) RequestID() []byte { return nil } +func (p *readError) SetRequestID([]byte) {} + +// readErrorf creates a readError with the given text. +func readErrorf(format string, args ...interface{}) *readError { + return &readError{fmt.Errorf(format, args...)} +} + +// This is the response timeout used in tests. +const waitTime = 300 * time.Millisecond + +// conn is a connection to the node under test. +type conn struct { + localNode *enode.LocalNode + localKey *ecdsa.PrivateKey + remote *enode.Node + remoteAddr *net.UDPAddr + listeners []net.PacketConn + + log logger + codec *v5wire.Codec + lastRequest v5wire.Packet + lastChallenge *v5wire.Whoareyou + idCounter uint32 +} + +type logger interface { + Logf(string, ...interface{}) +} + +// newConn sets up a connection to the given node. +func newConn(dest *enode.Node, log logger) *conn { + key, err := crypto.GenerateKey() + if err != nil { + panic(err) + } + db, err := enode.OpenDB("") + if err != nil { + panic(err) + } + ln := enode.NewLocalNode(db, key) + + return &conn{ + localKey: key, + localNode: ln, + remote: dest, + remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()}, + codec: v5wire.NewCodec(ln, key, mclock.System{}), + log: log, + } +} + +func (tc *conn) setEndpoint(c net.PacketConn) { + tc.localNode.SetStaticIP(laddr(c).IP) + tc.localNode.SetFallbackUDP(laddr(c).Port) +} + +func (tc *conn) listen(ip string) net.PacketConn { + l, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", ip)) + if err != nil { + panic(err) + } + tc.listeners = append(tc.listeners, l) + return l +} + +// close shuts down all listeners and the local node. +func (tc *conn) close() { + for _, l := range tc.listeners { + l.Close() + } + tc.localNode.Database().Close() +} + +// nextReqID creates a request id. +func (tc *conn) nextReqID() []byte { + id := make([]byte, 4) + tc.idCounter++ + binary.BigEndian.PutUint32(id, tc.idCounter) + return id +} + +// reqresp performs a request/response interaction on the given connection. +// The request is retried if a handshake is requested. +func (tc *conn) reqresp(c net.PacketConn, req v5wire.Packet) v5wire.Packet { + reqnonce := tc.write(c, req, nil) + switch resp := tc.read(c).(type) { + case *v5wire.Whoareyou: + if resp.Nonce != reqnonce { + return readErrorf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], reqnonce[:]) + } + resp.Node = tc.remote + tc.write(c, req, resp) + return tc.read(c) + default: + return resp + } +} + +// findnode sends a FINDNODE request and waits for its responses. +func (tc *conn) findnode(c net.PacketConn, dists []uint) ([]*enode.Node, error) { + var ( + findnode = &v5wire.Findnode{ReqID: tc.nextReqID(), Distances: dists} + reqnonce = tc.write(c, findnode, nil) + first = true + total uint8 + results []*enode.Node + ) + for n := 1; n > 0; { + switch resp := tc.read(c).(type) { + case *v5wire.Whoareyou: + // Handle handshake. + if resp.Nonce == reqnonce { + resp.Node = tc.remote + tc.write(c, findnode, resp) + } else { + return nil, fmt.Errorf("unexpected WHOAREYOU (nonce %x), waiting for NODES", resp.Nonce[:]) + } + case *v5wire.Ping: + // Handle ping from remote. + tc.write(c, &v5wire.Pong{ + ReqID: resp.ReqID, + ENRSeq: tc.localNode.Seq(), + }, nil) + case *v5wire.Nodes: + // Got NODES! Check request ID. + if !bytes.Equal(resp.ReqID, findnode.ReqID) { + return nil, fmt.Errorf("NODES response has wrong request id %x", resp.ReqID) + } + // Check total count. It should be greater than one + // and needs to be the same across all responses. + if first { + if resp.Total == 0 || resp.Total > 6 { + return nil, fmt.Errorf("invalid NODES response 'total' %d (not in (0,7))", resp.Total) + } + total = resp.Total + n = int(total) - 1 + first = false + } else { + n-- + if resp.Total != total { + return nil, fmt.Errorf("invalid NODES response 'total' %d (!= %d)", resp.Total, total) + } + } + // Check nodes. + nodes, err := checkRecords(resp.Nodes) + if err != nil { + return nil, fmt.Errorf("invalid node in NODES response: %v", err) + } + results = append(results, nodes...) + default: + return nil, fmt.Errorf("expected NODES, got %v", resp) + } + } + return results, nil +} + +// write sends a packet on the given connection. +func (tc *conn) write(c net.PacketConn, p v5wire.Packet, challenge *v5wire.Whoareyou) v5wire.Nonce { + packet, nonce, err := tc.codec.Encode(tc.remote.ID(), tc.remoteAddr.String(), p, challenge) + if err != nil { + panic(fmt.Errorf("can't encode %v packet: %v", p.Name(), err)) + } + if _, err := c.WriteTo(packet, tc.remoteAddr); err != nil { + tc.logf("Can't send %s: %v", p.Name(), err) + } else { + tc.logf(">> %s", p.Name()) + } + return nonce +} + +// read waits for an incoming packet on the given connection. +func (tc *conn) read(c net.PacketConn) v5wire.Packet { + buf := make([]byte, 1280) + if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil { + return &readError{err} + } + n, fromAddr, err := c.ReadFrom(buf) + if err != nil { + return &readError{err} + } + _, _, p, err := tc.codec.Decode(buf[:n], fromAddr.String()) + if err != nil { + return &readError{err} + } + tc.logf("<< %s", p.Name()) + return p +} + +// logf prints to the test log. +func (tc *conn) logf(format string, args ...interface{}) { + if tc.log != nil { + tc.log.Logf("(%s) %s", tc.localNode.ID().TerminalString(), fmt.Sprintf(format, args...)) + } +} + +func laddr(c net.PacketConn) *net.UDPAddr { + return c.LocalAddr().(*net.UDPAddr) +} + +func checkRecords(records []*enr.Record) ([]*enode.Node, error) { + nodes := make([]*enode.Node, len(records)) + for i := range records { + n, err := enode.New(enode.ValidSchemes, records[i]) + if err != nil { + return nil, err + } + nodes[i] = n + } + return nodes, nil +} + +func containsUint(ints []uint, x uint) bool { + for i := range ints { + if ints[i] == x { + return true + } + } + return false +} diff --git a/cmd/devp2p/keycmd.go b/cmd/devp2p/keycmd.go new file mode 100644 index 000000000000..869b8c2a44f0 --- /dev/null +++ b/cmd/devp2p/keycmd.go @@ -0,0 +1,105 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "net" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/enode" + "gopkg.in/urfave/cli.v1" +) + +var ( + keyCommand = cli.Command{ + Name: "key", + Usage: "Operations on node keys", + Subcommands: []cli.Command{ + keyGenerateCommand, + keyToNodeCommand, + }, + } + keyGenerateCommand = cli.Command{ + Name: "generate", + Usage: "Generates node key files", + ArgsUsage: "keyfile", + Action: genkey, + } + keyToNodeCommand = cli.Command{ + Name: "to-enode", + Usage: "Creates an enode URL from a node key file", + ArgsUsage: "keyfile", + Action: keyToURL, + Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag}, + } +) + +var ( + hostFlag = cli.StringFlag{ + Name: "ip", + Usage: "IP address of the node", + Value: "127.0.0.1", + } + tcpPortFlag = cli.IntFlag{ + Name: "tcp", + Usage: "TCP port of the node", + Value: 30303, + } + udpPortFlag = cli.IntFlag{ + Name: "udp", + Usage: "UDP port of the node", + Value: 30303, + } +) + +func genkey(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need key file as argument") + } + file := ctx.Args().Get(0) + + key, err := crypto.GenerateKey() + if err != nil { + return fmt.Errorf("could not generate key: %v", err) + } + return crypto.SaveECDSA(file, key) +} + +func keyToURL(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need key file as argument") + } + + var ( + file = ctx.Args().Get(0) + host = ctx.String(hostFlag.Name) + tcp = ctx.Int(tcpPortFlag.Name) + udp = ctx.Int(udpPortFlag.Name) + ) + key, err := crypto.LoadECDSA(file) + if err != nil { + return err + } + ip := net.ParseIP(host) + if ip == nil { + return fmt.Errorf("invalid IP address %q", host) + } + node := enode.NewV4(&key.PublicKey, ip, tcp, udp) + fmt.Println(node.URLv4()) + return nil +} diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go index 6faa65093737..4a4e905a424e 100644 --- a/cmd/devp2p/main.go +++ b/cmd/devp2p/main.go @@ -45,7 +45,7 @@ func init() { // Set up the CLI app. app.Flags = append(app.Flags, debug.Flags...) app.Before = func(ctx *cli.Context) error { - return debug.Setup(ctx, "") + return debug.Setup(ctx) } app.After = func(ctx *cli.Context) error { debug.Exit() @@ -58,9 +58,12 @@ func init() { // Add subcommands. app.Commands = []cli.Command{ enrdumpCommand, + keyCommand, discv4Command, + discv5Command, dnsCommand, nodesetCommand, + rlpxCommand, } } @@ -78,7 +81,7 @@ func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool { // getNodeArg handles the common case of a single node descriptor argument. func getNodeArg(ctx *cli.Context) *enode.Node { - if ctx.NArg() != 1 { + if ctx.NArg() < 1 { exit("missing node as command-line argument") } n, err := parseNode(ctx.Args()[0]) diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index de8e6d45ee6c..ba97405abcd6 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -95,6 +95,7 @@ var filterFlags = map[string]nodeFilterC{ "-min-age": {1, minAgeFilter}, "-eth-network": {1, ethFilter}, "-les-server": {0, lesFilter}, + "-snap": {0, snapFilter}, } func parseFilters(args []string) ([]nodeFilter, error) { @@ -104,15 +105,15 @@ func parseFilters(args []string) ([]nodeFilter, error) { if !ok { return nil, fmt.Errorf("invalid filter %q", args[0]) } - if len(args) < fc.narg { - return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) + if len(args)-1 < fc.narg { + return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)-1) } - filter, err := fc.fn(args[1:]) + filter, err := fc.fn(args[1 : 1+fc.narg]) if err != nil { return nil, fmt.Errorf("%s: %v", args[0], err) } filters = append(filters, filter) - args = args[fc.narg+1:] + args = args[1+fc.narg:] } return filters, nil } @@ -164,7 +165,7 @@ func ethFilter(args []string) (nodeFilter, error) { case "goerli": filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) case "ropsten": - filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash) + filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) default: return nil, fmt.Errorf("unknown network %q", args[0]) } @@ -191,3 +192,13 @@ func lesFilter(args []string) (nodeFilter, error) { } return f, nil } + +func snapFilter(args []string) (nodeFilter, error) { + f := func(n nodeJSON) bool { + var snap struct { + _ []rlp.RawValue `rlp:"tail"` + } + return n.N.Load(enr.WithEntry("snap", &snap)) == nil + } + return f, nil +} diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go new file mode 100644 index 000000000000..d90eb4687cad --- /dev/null +++ b/cmd/devp2p/rlpxcmd.go @@ -0,0 +1,99 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "net" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" + "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" +) + +var ( + rlpxCommand = cli.Command{ + Name: "rlpx", + Usage: "RLPx Commands", + Subcommands: []cli.Command{ + rlpxPingCommand, + rlpxEthTestCommand, + }, + } + rlpxPingCommand = cli.Command{ + Name: "ping", + Usage: "ping ", + Action: rlpxPing, + } + rlpxEthTestCommand = cli.Command{ + Name: "eth-test", + Usage: "Runs tests against a node", + ArgsUsage: " ", + Action: rlpxEthTest, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, + } +) + +func rlpxPing(ctx *cli.Context) error { + n := getNodeArg(ctx) + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", n.IP(), n.TCP())) + if err != nil { + return err + } + conn := rlpx.NewConn(fd, n.Pubkey()) + ourKey, _ := crypto.GenerateKey() + _, err = conn.Handshake(ourKey) + if err != nil { + return err + } + code, data, _, err := conn.Read() + if err != nil { + return err + } + switch code { + case 0: + var h ethtest.Hello + if err := rlp.DecodeBytes(data, &h); err != nil { + return fmt.Errorf("invalid handshake: %v", err) + } + fmt.Printf("%+v\n", h) + case 1: + var msg []p2p.DiscReason + if rlp.DecodeBytes(data, &msg); len(msg) == 0 { + return fmt.Errorf("invalid disconnect message") + } + return fmt.Errorf("received disconnect message: %v", msg[0]) + default: + return fmt.Errorf("invalid message code %d, expected handshake (code zero)", code) + } + return nil +} + +// rlpxEthTest runs the eth protocol test suite. +func rlpxEthTest(ctx *cli.Context) error { + if ctx.NArg() < 3 { + exit("missing path to chain.rlp as command-line argument") + } + suite := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + return runTests(ctx, suite.AllTests()) +} diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go new file mode 100644 index 000000000000..4168f8555bfb --- /dev/null +++ b/cmd/devp2p/runtest.go @@ -0,0 +1,69 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/urfave/cli.v1" +) + +var ( + testPatternFlag = cli.StringFlag{ + Name: "run", + Usage: "Pattern of test suite(s) to run", + } + testTAPFlag = cli.BoolFlag{ + Name: "tap", + Usage: "Output TAP", + } + // These two are specific to the discovery tests. + testListen1Flag = cli.StringFlag{ + Name: "listen1", + Usage: "IP address of the first tester", + Value: v4test.Listen1, + } + testListen2Flag = cli.StringFlag{ + Name: "listen2", + Usage: "IP address of the second tester", + Value: v4test.Listen2, + } +) + +func runTests(ctx *cli.Context, tests []utesting.Test) error { + // Filter test cases. + if ctx.IsSet(testPatternFlag.Name) { + tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) + } + // Disable logging unless explicitly enabled. + if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") { + log.Root().SetHandler(log.DiscardHandler()) + } + // Run the tests. + var run = utesting.RunTests + if ctx.Bool(testTAPFlag.Name) { + run = utesting.RunTAP + } + results := run(tests, os.Stdout) + if utesting.CountFailures(results) > 0 { + os.Exit(1) + } + return nil +} diff --git a/cmd/ethkey/changepassword.go b/cmd/ethkey/changepassword.go index 32fde4ed6daf..b9402c2f96da 100644 --- a/cmd/ethkey/changepassword.go +++ b/cmd/ethkey/changepassword.go @@ -51,7 +51,7 @@ Change the password of a keyfile.`, } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) @@ -67,7 +67,7 @@ Change the password of a keyfile.`, } newPhrase = strings.TrimRight(string(content), "\r\n") } else { - newPhrase = promptPassphrase(true) + newPhrase = utils.GetPassPhrase("", true) } // Encrypt the key with the new passphrase. diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go index fe9a0c15192e..c2aa1c6fb4a9 100644 --- a/cmd/ethkey/generate.go +++ b/cmd/ethkey/generate.go @@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting Name: "privatekey", Usage: "file containing a raw private key to encrypt", }, + cli.BoolFlag{ + Name: "lightkdf", + Usage: "use less secure scrypt parameters", + }, }, Action: func(ctx *cli.Context) error { // Check if keyfile path given and make sure it doesn't already exist. @@ -90,8 +94,12 @@ If you want to encrypt an existing private key, it can be specified by setting } // Encrypt key with passphrase. - passphrase := promptPassphrase(true) - keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP) + passphrase := getPassphrase(ctx, true) + scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP + if ctx.Bool("lightkdf") { + scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP + } + keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP) if err != nil { utils.Fatalf("Error encrypting key: %v", err) } diff --git a/cmd/ethkey/inspect.go b/cmd/ethkey/inspect.go index ba03d4d93692..b646e43aa576 100644 --- a/cmd/ethkey/inspect.go +++ b/cmd/ethkey/inspect.go @@ -60,7 +60,7 @@ make sure to use this feature with great caution!`, } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/main.go b/cmd/ethkey/main.go index dbc49605888a..6db39174c461 100644 --- a/cmd/ethkey/main.go +++ b/cmd/ethkey/main.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/internal/flags" "gopkg.in/urfave/cli.v1" ) @@ -35,7 +35,7 @@ var gitDate = "" var app *cli.App func init() { - app = utils.NewApp(gitCommit, gitDate, "an Ethereum key manager") + app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager") app.Commands = []cli.Command{ commandGenerate, commandInspect, @@ -43,7 +43,7 @@ func init() { commandSignMessage, commandVerifyMessage, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } // Commonly used command line flags. diff --git a/cmd/ethkey/message.go b/cmd/ethkey/message.go index 5caea69ff653..69c8cf092392 100644 --- a/cmd/ethkey/message.go +++ b/cmd/ethkey/message.go @@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag. } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/message_test.go b/cmd/ethkey/message_test.go index e9e8eeeafb88..9d242ac00244 100644 --- a/cmd/ethkey/message_test.go +++ b/cmd/ethkey/message_test.go @@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) { message := "test message" // Create the key. - generate := runEthkey(t, "generate", keyfile) + generate := runEthkey(t, "generate", "--lightkdf", keyfile) generate.Expect(` !! Unsupported terminal, password will be echoed. Password: {{.InputLine "foobar"}} diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index c6cf5c25a3ba..f2986e8ee91b 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -23,36 +23,14 @@ import ( "strings" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" "gopkg.in/urfave/cli.v1" ) -// promptPassphrase prompts the user for a passphrase. Set confirmation to true -// to require the user to confirm the passphrase. -func promptPassphrase(confirmation bool) string { - passphrase, err := console.Stdin.PromptPassword("Password: ") - if err != nil { - utils.Fatalf("Failed to read password: %v", err) - } - - if confirmation { - confirm, err := console.Stdin.PromptPassword("Repeat password: ") - if err != nil { - utils.Fatalf("Failed to read password confirmation: %v", err) - } - if passphrase != confirm { - utils.Fatalf("Passwords do not match") - } - } - - return passphrase -} - // getPassphrase obtains a passphrase given by the user. It first checks the // --passfile command line flag and ultimately prompts the user for a // passphrase. -func getPassphrase(ctx *cli.Context) string { +func getPassphrase(ctx *cli.Context, confirmation bool) string { // Look for the --passwordfile flag. passphraseFile := ctx.String(passphraseFlag.Name) if passphraseFile != "" { @@ -65,7 +43,7 @@ func getPassphrase(ctx *cli.Context) string { } // Otherwise prompt the user for the passphrase. - return promptPassphrase(false) + return utils.GetPassPhrase("", confirmation) } // signHash is a helper function that calculates a hash for the given message diff --git a/cmd/evm/README.md b/cmd/evm/README.md new file mode 100644 index 000000000000..8f0848bde80d --- /dev/null +++ b/cmd/evm/README.md @@ -0,0 +1,270 @@ +## EVM state transition tool + +The `evm t8n` tool is a stateless state transition utility. It is a utility +which can + +1. Take a prestate, including + - Accounts, + - Block context information, + - Previous blockshashes (*optional) +2. Apply a set of transactions, +3. Apply a mining-reward (*optional), +4. And generate a post-state, including + - State root, transaction root, receipt root, + - Information about rejected transactions, + - Optionally: a full or partial post-state dump + +## Specification + +The idea is to specify the behaviour of this binary very _strict_, so that other +node implementors can build replicas based on their own state-machines, and the +state generators can swap between a `geth`-based implementation and a `parityvm`-based +implementation. + +### Command line params + +Command line params that has to be supported are +``` + + --trace Output full trace logs to files .jsonl + --trace.nomemory Disable full memory dump in traces + --trace.nostack Disable stack output in traces + --trace.noreturndata Disable return data output in traces + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".") + --output.alloc alloc Determines where to put the alloc of the post-state. + `stdout` - into the stdout output + `stderr` - into the stderr output + --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state. + `stdout` - into the stdout output + `stderr` - into the stderr output + --state.fork value Name of ruleset to use. + --state.chainid value ChainID to use (default: 1) + --state.reward value Mining reward. Set to -1 to disable (default: 0) + +``` + +### Error codes and output + +All logging should happen against the `stderr`. +There are a few (not many) errors that can occur, those are defined below. + +#### EVM-based errors (`2` to `9`) + +- Other EVM error. Exit code `2` +- Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`. +- Block history is not supplied, but needed for a `BLOCKHASH` operation. If `BLOCKHASH` + is invoked targeting a block which history has not been provided for, the program will + exit with code `4`. + +#### IO errors (`10`-`20`) + +- Invalid input json: the supplied data could not be marshalled. + The program will exit with code `10` +- IO problems: failure to load or save files, the program will exit with code `11` + +## Examples +### Basic usage + +Invoking it with the provided example files +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json +``` +Two resulting files: + +`alloc.json`: +```json +{ + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } +} +``` +`result.json`: +```json +{ + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + 1 + ] +} +``` + +We can make them spit out the data to e.g. `stdout` like this: +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout +``` +Output: +```json +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } + }, + "result": { + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + 1 + ] + } +} +``` + +## About Ommers + +Mining rewards and ommer rewards might need to be added. This is how those are applied: + +- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. +- For each ommer (mined by `0xbb`), with blocknumber `N-delta` + - (where `delta` is the difference between the current block and the ommer) + - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` + - The account `0xaa` (block miner) is awarded `block_reward / 32` + +To make `state_t8n` apply these, the following inputs are required: + +- `state.reward` + - For ethash, it is `5000000000000000000` `wei`, + - If this is not defined, mining rewards are not applied, + - A value of `0` is valid, and causes accounts to be 'touched'. +- For each ommer, the tool needs to be given an `address` and a `delta`. This + is done via the `env`. + +Note: the tool does not verify that e.g. the normal uncle rules apply, +and allows e.g two uncles at the same height, or the uncle-distance. This means that +the tool allows for negative uncle reward (distance > 8) + +Example: +`./testdata/5/env.json`: +```json +{ + "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000", + "ommers": [ + {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" }, + {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" } + ] +} +``` +When applying this, using a reward of `0x08` +Output: +```json +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x88" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x70" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x60" + } + } +} +``` +### Future EIPS + +It is also possible to experiment with future eips that are not yet defined in a hard fork. +Example, putting EIP-1344 into Frontier: +``` +./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json +``` + +### Block history + +The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. +If a required blockhash is not provided, the exit code should be `4`: +Example where blockhashes are provided: +``` +./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +``` +``` +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 +``` +``` +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":112885} +``` + +In this example, the caller has not provided the required blockhash: +``` +./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace +``` +``` +ERROR(4): getHash(3) invoked, blockhash for that block not provided +``` +Error code: 4 +### Chaining + +Another thing that can be done, is to chain invocations: +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json +INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" + +``` +What happened here, is that we first applied two identical transactions, so the second one was rejected. +Then, taking the poststate alloc as the input for the next state, we tried again to include +the same two transactions: this time, both failed due to too low nonce. + +In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the +actual blocknumber (exposed to the EVM) would not increase. + diff --git a/cmd/evm/compiler.go b/cmd/evm/compiler.go index c019a2fe70b7..40ad9313c514 100644 --- a/cmd/evm/compiler.go +++ b/cmd/evm/compiler.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var compileCommand = cli.Command{ diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go index 69f611e39b11..68a09cbf50cd 100644 --- a/cmd/evm/disasm.go +++ b/cmd/evm/disasm.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/core/asm" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var disasmCommand = cli.Command{ @@ -34,17 +34,22 @@ var disasmCommand = cli.Command{ } func disasmCmd(ctx *cli.Context) error { - if len(ctx.Args().First()) == 0 { - return errors.New("filename required") + var in string + switch { + case len(ctx.Args().First()) > 0: + fn := ctx.Args().First() + input, err := ioutil.ReadFile(fn) + if err != nil { + return err + } + in = string(input) + case ctx.GlobalIsSet(InputFlag.Name): + in = ctx.GlobalString(InputFlag.Name) + default: + return errors.New("Missing filename or --input value") } - fn := ctx.Args().First() - in, err := ioutil.ReadFile(fn) - if err != nil { - return err - } - - code := strings.TrimSpace(string(in)) + code := strings.TrimSpace(in) fmt.Printf("%v\n", code) return asm.PrintDisassembled(code) } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go new file mode 100644 index 000000000000..171443e15625 --- /dev/null +++ b/cmd/evm/internal/t8ntool/execution.go @@ -0,0 +1,264 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package t8ntool + +import ( + "fmt" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +type Prestate struct { + Env stEnv `json:"env"` + Pre core.GenesisAlloc `json:"pre"` +} + +// ExecutionResult contains the execution status after running a state test, any +// error that might have occurred and a dump of the final state if requested. +type ExecutionResult struct { + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []int `json:"rejected,omitempty"` +} + +type ommer struct { + Delta uint64 `json:"delta"` + Address common.Address `json:"address"` +} + +//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go +type stEnv struct { + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` +} + +type stEnvMarshaling struct { + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 +} + +// Apply applies a set of transactions to a pre-state +func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, + txs types.Transactions, miningReward int64, + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { + + // Capture errors for BLOCKHASH operation, if we haven't been supplied the + // required blockhashes + var hashError error + getHash := func(num uint64) common.Hash { + if pre.Env.BlockHashes == nil { + hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) + return common.Hash{} + } + h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)] + if !ok { + hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) + } + return h + } + var ( + statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) + gaspool = new(core.GasPool) + blockHash = common.Hash{0x13, 0x37} + rejectedTxs []int + includedTxs types.Transactions + gasUsed = uint64(0) + receipts = make(types.Receipts, 0) + txIndex = 0 + ) + gaspool.AddGas(pre.Env.GasLimit) + vmContext := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: pre.Env.Coinbase, + BlockNumber: new(big.Int).SetUint64(pre.Env.Number), + Time: new(big.Int).SetUint64(pre.Env.Timestamp), + Difficulty: pre.Env.Difficulty, + GasLimit: pre.Env.GasLimit, + GetHash: getHash, + } + // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's + // done in StateProcessor.Process(block, ...), right before transactions are applied. + if chainConfig.DAOForkSupport && + chainConfig.DAOForkBlock != nil && + chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { + misc.ApplyDAOHardFork(statedb) + } + + for i, tx := range txs { + msg, err := tx.AsMessage(signer) + if err != nil { + log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err) + rejectedTxs = append(rejectedTxs, i) + continue + } + tracer, err := getTracerFn(txIndex, tx.Hash()) + if err != nil { + return nil, nil, err + } + vmConfig.Tracer = tracer + vmConfig.Debug = (tracer != nil) + statedb.Prepare(tx.Hash(), blockHash, txIndex) + txContext := core.NewEVMTxContext(msg) + + evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) + if chainConfig.IsYoloV2(vmContext.BlockNumber) { + statedb.AddAddressToAccessList(msg.From()) + if dst := msg.To(); dst != nil { + statedb.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range evm.ActivePrecompiles() { + statedb.AddAddressToAccessList(addr) + } + } + snapshot := statedb.Snapshot() + // (ret []byte, usedGas uint64, failed bool, err error) + msgResult, err := core.ApplyMessage(evm, msg, gaspool) + if err != nil { + statedb.RevertToSnapshot(snapshot) + log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err) + rejectedTxs = append(rejectedTxs, i) + continue + } + includedTxs = append(includedTxs, tx) + if hashError != nil { + return nil, nil, NewError(ErrorMissingBlockhash, hashError) + } + gasUsed += msgResult.UsedGas + // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx + { + var root []byte + if chainConfig.IsByzantium(vmContext.BlockNumber) { + statedb.Finalise(true) + } else { + root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes() + } + + receipt := types.NewReceipt(root, msgResult.Failed(), gasUsed) + receipt.TxHash = tx.Hash() + receipt.GasUsed = msgResult.UsedGas + // if the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + } + // Set the receipt logs and create a bloom for filtering + receipt.Logs = statedb.GetLogs(tx.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + // These three are non-consensus fields + //receipt.BlockHash + //receipt.BlockNumber = + receipt.TransactionIndex = uint(txIndex) + receipts = append(receipts, receipt) + } + txIndex++ + } + statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) + // Add mining reward? + if miningReward > 0 { + // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases + // where + // - the coinbase suicided, or + // - there are only 'bad' transactions, which aren't executed. In those cases, + // the coinbase gets no txfee, so isn't created, and thus needs to be touched + var ( + blockReward = big.NewInt(miningReward) + minerReward = new(big.Int).Set(blockReward) + perOmmer = new(big.Int).Div(blockReward, big.NewInt(32)) + ) + for _, ommer := range pre.Env.Ommers { + // Add 1/32th for each ommer included + minerReward.Add(minerReward, perOmmer) + // Add (8-delta)/8 + reward := big.NewInt(8) + reward.Sub(reward, big.NewInt(0).SetUint64(ommer.Delta)) + reward.Mul(reward, blockReward) + reward.Div(reward, big.NewInt(8)) + statedb.AddBalance(ommer.Address, reward) + } + statedb.AddBalance(pre.Env.Coinbase, minerReward) + } + // Commit block + root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber)) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not commit state: %v", err) + return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) + } + execRs := &ExecutionResult{ + StateRoot: root, + TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)), + ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)), + Bloom: types.CreateBloom(receipts), + LogsHash: rlpHash(statedb.Logs()), + Receipts: receipts, + Rejected: rejectedTxs, + } + return statedb, execRs, nil +} + +func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { + sdb := state.NewDatabase(db) + statedb, _ := state.New(common.Hash{}, sdb, nil) + for addr, a := range accounts { + statedb.SetCode(addr, a.Code) + statedb.SetNonce(addr, a.Nonce) + statedb.SetBalance(addr, a.Balance) + for k, v := range a.Storage { + statedb.SetState(addr, k, v) + } + } + // Commit and re-open to start with a clean state. + root, _ := statedb.Commit(false) + statedb, _ = state.New(root, sdb, nil) + return statedb +} + +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) + hw.Sum(h[:0]) + return h +} diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go new file mode 100644 index 000000000000..424156ba8222 --- /dev/null +++ b/cmd/evm/internal/t8ntool/flags.go @@ -0,0 +1,108 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package t8ntool + +import ( + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +var ( + TraceFlag = cli.BoolFlag{ + Name: "trace", + Usage: "Output full trace logs to files .jsonl", + } + TraceDisableMemoryFlag = cli.BoolFlag{ + Name: "trace.nomemory", + Usage: "Disable full memory dump in traces", + } + TraceDisableStackFlag = cli.BoolFlag{ + Name: "trace.nostack", + Usage: "Disable stack output in traces", + } + TraceDisableReturnDataFlag = cli.BoolFlag{ + Name: "trace.noreturndata", + Usage: "Disable return data output in traces", + } + OutputBasedir = cli.StringFlag{ + Name: "output.basedir", + Usage: "Specifies where output files are placed. Will be created if it does not exist.", + Value: "", + } + OutputAllocFlag = cli.StringFlag{ + Name: "output.alloc", + Usage: "Determines where to put the `alloc` of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "alloc.json", + } + OutputResultFlag = cli.StringFlag{ + Name: "output.result", + Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "result.json", + } + InputAllocFlag = cli.StringFlag{ + Name: "input.alloc", + Usage: "`stdin` or file name of where to find the prestate alloc to use.", + Value: "alloc.json", + } + InputEnvFlag = cli.StringFlag{ + Name: "input.env", + Usage: "`stdin` or file name of where to find the prestate env to use.", + Value: "env.json", + } + InputTxsFlag = cli.StringFlag{ + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions to apply.", + Value: "txs.json", + } + RewardFlag = cli.Int64Flag{ + Name: "state.reward", + Usage: "Mining reward. Set to -1 to disable", + Value: 0, + } + ChainIDFlag = cli.Int64Flag{ + Name: "state.chainid", + Usage: "ChainID to use", + Value: 1, + } + ForknameFlag = cli.StringFlag{ + Name: "state.fork", + Usage: fmt.Sprintf("Name of ruleset to use."+ + "\n\tAvailable forknames:"+ + "\n\t %v"+ + "\n\tAvailable extra eips:"+ + "\n\t %v"+ + "\n\tSyntax (+ExtraEip)", + strings.Join(tests.AvailableForks(), "\n\t "), + strings.Join(vm.ActivateableEips(), ", ")), + Value: "Istanbul", + } + VerbosityFlag = cli.IntFlag{ + Name: "verbosity", + Usage: "sets the verbosity level", + Value: 3, + } +) diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go new file mode 100644 index 000000000000..ab5951534e48 --- /dev/null +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -0,0 +1,80 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package t8ntool + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" +) + +var _ = (*stEnvMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s stEnv) MarshalJSON() ([]byte, error) { + type stEnv struct { + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + } + var enc stEnv + enc.Coinbase = common.UnprefixedAddress(s.Coinbase) + enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) + enc.GasLimit = math.HexOrDecimal64(s.GasLimit) + enc.Number = math.HexOrDecimal64(s.Number) + enc.Timestamp = math.HexOrDecimal64(s.Timestamp) + enc.BlockHashes = s.BlockHashes + enc.Ommers = s.Ommers + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *stEnv) UnmarshalJSON(input []byte) error { + type stEnv struct { + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + } + var dec stEnv + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Coinbase == nil { + return errors.New("missing required field 'currentCoinbase' for stEnv") + } + s.Coinbase = common.Address(*dec.Coinbase) + if dec.Difficulty == nil { + return errors.New("missing required field 'currentDifficulty' for stEnv") + } + s.Difficulty = (*big.Int)(dec.Difficulty) + if dec.GasLimit == nil { + return errors.New("missing required field 'currentGasLimit' for stEnv") + } + s.GasLimit = uint64(*dec.GasLimit) + if dec.Number == nil { + return errors.New("missing required field 'currentNumber' for stEnv") + } + s.Number = uint64(*dec.Number) + if dec.Timestamp == nil { + return errors.New("missing required field 'currentTimestamp' for stEnv") + } + s.Timestamp = uint64(*dec.Timestamp) + if dec.BlockHashes != nil { + s.BlockHashes = dec.BlockHashes + } + if dec.Ommers != nil { + s.Ommers = dec.Ommers + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go new file mode 100644 index 000000000000..5119ed5fb76a --- /dev/null +++ b/cmd/evm/internal/t8ntool/transition.go @@ -0,0 +1,289 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "os" + "path" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +const ( + ErrorEVM = 2 + ErrorVMConfig = 3 + ErrorMissingBlockhash = 4 + + ErrorJson = 10 + ErrorIO = 11 + + stdinSelector = "stdin" +) + +type NumberedError struct { + errorCode int + err error +} + +func NewError(errorCode int, err error) *NumberedError { + return &NumberedError{errorCode, err} +} + +func (n *NumberedError) Error() string { + return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) +} + +func (n *NumberedError) Code() int { + return n.errorCode +} + +type input struct { + Alloc core.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + Txs types.Transactions `json:"txs,omitempty"` +} + +func Main(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + var ( + err error + tracer vm.Tracer + baseDir = "" + ) + var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) + + // If user specified a basedir, make sure it exists + if ctx.IsSet(OutputBasedir.Name) { + if base := ctx.String(OutputBasedir.Name); len(base) > 0 { + err := os.MkdirAll(base, 0755) // //rw-r--r-- + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + baseDir = base + } + } + if ctx.Bool(TraceFlag.Name) { + // Configure the EVM logger + logConfig := &vm.LogConfig{ + DisableStack: ctx.Bool(TraceDisableStackFlag.Name), + DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name), + DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name), + Debug: true, + } + var prevFile *os.File + // This one closes the last file + defer func() { + if prevFile != nil { + prevFile.Close() + } + }() + getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { + if prevFile != nil { + prevFile.Close() + } + traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String()))) + if err != nil { + return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) + } + prevFile = traceFile + return vm.NewJSONLogger(logConfig, traceFile), nil + } + } else { + getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { + return nil, nil + } + } + // We need to load three things: alloc, env and transactions. May be either in + // stdin input or in files. + // Check if anything needs to be read from stdin + var ( + prestate Prestate + txs types.Transactions // txs to apply + allocStr = ctx.String(InputAllocFlag.Name) + + envStr = ctx.String(InputEnvFlag.Name) + txStr = ctx.String(InputTxsFlag.Name) + inputData = &input{} + ) + + if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + decoder.Decode(inputData) + } + if allocStr != stdinSelector { + inFile, err := os.Open(allocStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if err := decoder.Decode(&inputData.Alloc); err != nil { + return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err)) + } + } + + if envStr != stdinSelector { + inFile, err := os.Open(envStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + var env stEnv + if err := decoder.Decode(&env); err != nil { + return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err)) + } + inputData.Env = &env + } + + if txStr != stdinSelector { + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + var txs types.Transactions + if err := decoder.Decode(&txs); err != nil { + return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) + } + inputData.Txs = txs + } + + prestate.Pre = inputData.Alloc + prestate.Env = *inputData.Env + txs = inputData.Txs + + // Iterate over all the tests, run them and aggregate the results + vmConfig := vm.Config{ + Tracer: tracer, + Debug: (tracer != nil), + } + // Construct the chainconfig + var chainConfig *params.ChainConfig + if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { + return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err)) + } else { + chainConfig = cConf + vmConfig.ExtraEips = extraEips + } + // Set the chain id + chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + + // Run the test and aggregate the result + state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + if err != nil { + return err + } + // Dump the excution result + //postAlloc := state.DumpGenesisFormat(false, false, false) + collector := make(Alloc) + state.DumpToCollector(collector, false, false, false, nil, -1) + return dispatchOutput(ctx, baseDir, result, collector) + +} + +type Alloc map[common.Address]core.GenesisAccount + +func (g Alloc) OnRoot(common.Hash) {} + +func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { + balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10) + var storage map[common.Hash]common.Hash + if dumpAccount.Storage != nil { + storage = make(map[common.Hash]common.Hash) + for k, v := range dumpAccount.Storage { + storage[k] = common.HexToHash(v) + } + } + genesisAccount := core.GenesisAccount{ + Code: common.FromHex(dumpAccount.Code), + Storage: storage, + Balance: balance, + Nonce: dumpAccount.Nonce, + } + g[addr] = genesisAccount +} + +// saveFile marshalls the object to the given file +func saveFile(baseDir, filename string, data interface{}) error { + b, err := json.MarshalIndent(data, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil { + return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) + } + return nil +} + +// dispatchOutput writes the output data to either stderr or stdout, or to the specified +// files +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error { + stdOutObject := make(map[string]interface{}) + stdErrObject := make(map[string]interface{}) + dispatch := func(baseDir, fName, name string, obj interface{}) error { + switch fName { + case "stdout": + stdOutObject[name] = obj + case "stderr": + stdErrObject[name] = obj + default: // save to file + if err := saveFile(baseDir, fName, obj); err != nil { + return err + } + } + return nil + } + if err := dispatch(baseDir, ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { + return err + } + if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { + return err + } + if len(stdOutObject) > 0 { + b, err := json.MarshalIndent(stdOutObject, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + os.Stdout.Write(b) + } + if len(stdErrObject) > 0 { + b, err := json.MarshalIndent(stdErrObject, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + os.Stderr.Write(b) + } + return nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 67447d58ae63..35c672142dbe 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -22,7 +22,9 @@ import ( "math/big" "os" + "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/internal/flags" "gopkg.in/urfave/cli.v1" ) @@ -30,7 +32,7 @@ var gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags) var gitDate = "" var ( - app = utils.NewApp(gitCommit, gitDate, "the evm command line interface") + app = flags.NewApp(gitCommit, gitDate, "the evm command line interface") DebugFlag = cli.BoolFlag{ Name: "debug", @@ -87,6 +89,10 @@ var ( Name: "verbosity", Usage: "sets the verbosity level", } + BenchFlag = cli.BoolFlag{ + Name: "bench", + Usage: "benchmark the execution", + } CreateFlag = cli.BoolFlag{ Name: "create", Usage: "indicates the action should be create rather than call", @@ -115,6 +121,14 @@ var ( Name: "nostack", Usage: "disable stack output", } + DisableStorageFlag = cli.BoolFlag{ + Name: "nostorage", + Usage: "disable storage output", + } + DisableReturnDataFlag = cli.BoolFlag{ + Name: "noreturndata", + Usage: "disable return data output", + } EVMInterpreterFlag = cli.StringFlag{ Name: "vm.evm", Usage: "External EVM configuration (default = built-in interpreter)", @@ -122,8 +136,32 @@ var ( } ) +var stateTransitionCommand = cli.Command{ + Name: "transition", + Aliases: []string{"t8n"}, + Usage: "executes a full state transition", + Action: t8ntool.Main, + Flags: []cli.Flag{ + t8ntool.TraceFlag, + t8ntool.TraceDisableMemoryFlag, + t8ntool.TraceDisableStackFlag, + t8ntool.TraceDisableReturnDataFlag, + t8ntool.OutputBasedir, + t8ntool.OutputAllocFlag, + t8ntool.OutputResultFlag, + t8ntool.InputAllocFlag, + t8ntool.InputEnvFlag, + t8ntool.InputTxsFlag, + t8ntool.ForknameFlag, + t8ntool.ChainIDFlag, + t8ntool.RewardFlag, + t8ntool.VerbosityFlag, + }, +} + func init() { app.Flags = []cli.Flag{ + BenchFlag, CreateFlag, DebugFlag, VerbosityFlag, @@ -144,6 +182,8 @@ func init() { ReceiverFlag, DisableMemoryFlag, DisableStackFlag, + DisableStorageFlag, + DisableReturnDataFlag, EVMInterpreterFlag, } app.Commands = []cli.Command{ @@ -151,13 +191,18 @@ func init() { disasmCommand, runCommand, stateTestCommand, + stateTransitionCommand, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } func main() { if err := app.Run(os.Args); err != nil { + code := 1 + if ec, ok := err.(*t8ntool.NumberedError); ok { + code = ec.Code() + } fmt.Fprintln(os.Stderr, err) - os.Exit(1) + os.Exit(code) } } diff --git a/cmd/evm/poststate.json b/cmd/evm/poststate.json new file mode 100644 index 000000000000..9ee17f18d139 --- /dev/null +++ b/cmd/evm/poststate.json @@ -0,0 +1,23 @@ +{ + "root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23", + "accounts": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "4276951709", + "nonce": 1, + "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "6916764286133345652", + "nonce": 172, + "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "42500", + "nonce": 0, + "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + } + } +} \ No newline at end of file diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index cecbf360639a..4063767cb8f4 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -25,6 +25,7 @@ import ( "os" goruntime "runtime" "runtime/pprof" + "testing" "time" "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" @@ -37,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var runCommand = cli.Command{ @@ -69,14 +70,49 @@ func readGenesis(genesisPath string) *core.Genesis { return genesis } +type execStats struct { + time time.Duration // The execution time. + allocs int64 // The number of heap allocations during execution. + bytesAllocated int64 // The cumulative number of bytes allocated during execution. +} + +func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) { + if bench { + result := testing.Benchmark(func(b *testing.B) { + for i := 0; i < b.N; i++ { + output, gasLeft, err = execFunc() + } + }) + + // Get the average execution time from the benchmarking result. + // There are other useful stats here that could be reported. + stats.time = time.Duration(result.NsPerOp()) + stats.allocs = result.AllocsPerOp() + stats.bytesAllocated = result.AllocedBytesPerOp() + } else { + var memStatsBefore, memStatsAfter goruntime.MemStats + goruntime.ReadMemStats(&memStatsBefore) + startTime := time.Now() + output, gasLeft, err = execFunc() + stats.time = time.Since(startTime) + goruntime.ReadMemStats(&memStatsAfter) + stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs) + stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc) + } + + return output, gasLeft, stats, err +} + func runCmd(ctx *cli.Context) error { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) log.Root().SetHandler(glogger) logconfig := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), - DisableStack: ctx.GlobalBool(DisableStackFlag.Name), - Debug: ctx.GlobalBool(DebugFlag.Name), + DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), + DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), + DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name), + Debug: ctx.GlobalBool(DebugFlag.Name), } var ( @@ -101,10 +137,10 @@ func runCmd(ctx *cli.Context) error { genesisConfig = gen db := rawdb.NewMemoryDatabase() genesis := gen.ToBlock(db) - statedb, _ = state.New(genesis.Root(), state.NewDatabase(db)) + statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil) chainConfig = gen.Config } else { - statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) + statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) genesisConfig = new(core.Genesis) } if ctx.GlobalString(SenderFlag.Name) != "" { @@ -116,11 +152,7 @@ func runCmd(ctx *cli.Context) error { receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name)) } - var ( - code []byte - ret []byte - err error - ) + var code []byte codeFileFlag := ctx.GlobalString(CodeFileFlag.Name) codeFlag := ctx.GlobalString(CodeFlag.Name) @@ -203,10 +235,10 @@ func runCmd(ctx *cli.Context) error { } else { runtimeConfig.ChainConfig = params.AllEthashProtocolChanges } - tstart := time.Now() - var leftOverGas uint64 + var hexInput []byte if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" { + var err error if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil { fmt.Printf("could not load input from file: %v\n", err) os.Exit(1) @@ -215,16 +247,25 @@ func runCmd(ctx *cli.Context) error { hexInput = []byte(ctx.GlobalString(InputFlag.Name)) } input := common.FromHex(string(bytes.TrimSpace(hexInput))) + + var execFunc func() ([]byte, uint64, error) if ctx.GlobalBool(CreateFlag.Name) { input = append(code, input...) - ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig) + execFunc = func() ([]byte, uint64, error) { + output, _, gasLeft, err := runtime.Create(input, &runtimeConfig) + return output, gasLeft, err + } } else { if len(code) > 0 { statedb.SetCode(receiver, code) } - ret, leftOverGas, err = runtime.Call(receiver, input, &runtimeConfig) + execFunc = func() ([]byte, uint64, error) { + return runtime.Call(receiver, input, &runtimeConfig) + } } - execTime := time.Since(tstart) + + bench := ctx.GlobalBool(BenchFlag.Name) + output, leftOverGas, stats, err := timedExec(bench, execFunc) if ctx.GlobalBool(DumpFlag.Name) { statedb.Commit(true) @@ -254,20 +295,15 @@ func runCmd(ctx *cli.Context) error { vm.WriteLogs(os.Stderr, statedb.Logs()) } - if ctx.GlobalBool(StatDumpFlag.Name) { - var mem goruntime.MemStats - goruntime.ReadMemStats(&mem) - fmt.Fprintf(os.Stderr, `evm execution time: %v -heap objects: %d -allocations: %d -total allocations: %d -GC calls: %d -Gas used: %d - -`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas) + if bench || ctx.GlobalBool(StatDumpFlag.Name) { + fmt.Fprintf(os.Stderr, `EVM gas used: %d +execution time: %v +allocations: %d +allocated bytes: %d +`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated) } if tracer == nil { - fmt.Printf("0x%x\n", ret) + fmt.Printf("0x%x\n", output) if err != nil { fmt.Printf(" error: %v\n", err) } diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index cef2aedb5e68..c4df936c75ff 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var stateTestCommand = cli.Command{ @@ -59,8 +59,10 @@ func stateTestCmd(ctx *cli.Context) error { // Configure the EVM logger config := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), - DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), + DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), + DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name), } var ( tracer vm.Tracer @@ -96,7 +98,7 @@ func stateTestCmd(ctx *cli.Context) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - state, err := test.Run(st, cfg) + _, state, err := test.Run(st, cfg, false) // print state root for evmlab tracing if ctx.GlobalBool(MachineFlag.Name) && state != nil { fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) diff --git a/cmd/evm/testdata/1/alloc.json b/cmd/evm/testdata/1/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/1/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/1/env.json b/cmd/evm/testdata/1/env.json new file mode 100644 index 000000000000..dd60abd205ac --- /dev/null +++ b/cmd/evm/testdata/1/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000" +} \ No newline at end of file diff --git a/cmd/evm/testdata/1/txs.json b/cmd/evm/testdata/1/txs.json new file mode 100644 index 000000000000..50b31ff31bcb --- /dev/null +++ b/cmd/evm/testdata/1/txs.json @@ -0,0 +1,26 @@ +[ + { + "gas": "0x5208", + "gasPrice": "0x2", + "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "input": "0x", + "nonce": "0x0", + "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb", + "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600", + "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "v": "0x1b", + "value": "0x1" + }, + { + "gas": "0x5208", + "gasPrice": "0x2", + "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "input": "0x", + "nonce": "0x0", + "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb", + "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600", + "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "v": "0x1b", + "value": "0x1" + } +] diff --git a/cmd/evm/testdata/2/alloc.json b/cmd/evm/testdata/2/alloc.json new file mode 100644 index 000000000000..a9720afc9367 --- /dev/null +++ b/cmd/evm/testdata/2/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x6001600053600160006001f0ff00", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/2/env.json b/cmd/evm/testdata/2/env.json new file mode 100644 index 000000000000..ebadd3f06ac7 --- /dev/null +++ b/cmd/evm/testdata/2/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8" +} \ No newline at end of file diff --git a/cmd/evm/testdata/2/readme.md b/cmd/evm/testdata/2/readme.md new file mode 100644 index 000000000000..c116f0e79274 --- /dev/null +++ b/cmd/evm/testdata/2/readme.md @@ -0,0 +1 @@ +These files examplify a selfdestruct to the `0`-address. \ No newline at end of file diff --git a/cmd/evm/testdata/2/txs.json b/cmd/evm/testdata/2/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/2/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/3/alloc.json b/cmd/evm/testdata/3/alloc.json new file mode 100644 index 000000000000..dca318ee5462 --- /dev/null +++ b/cmd/evm/testdata/3/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x600140", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/3/env.json b/cmd/evm/testdata/3/env.json new file mode 100644 index 000000000000..e283eff46151 --- /dev/null +++ b/cmd/evm/testdata/3/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8", + "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"} +} \ No newline at end of file diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md new file mode 100644 index 000000000000..499f03d7aa7f --- /dev/null +++ b/cmd/evm/testdata/3/readme.md @@ -0,0 +1,2 @@ +These files examplify a transition where a transaction (excuted on block 5) requests +the blockhash for block `1`. diff --git a/cmd/evm/testdata/3/txs.json b/cmd/evm/testdata/3/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/3/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/4/alloc.json b/cmd/evm/testdata/4/alloc.json new file mode 100644 index 000000000000..fadf2bdc4ece --- /dev/null +++ b/cmd/evm/testdata/4/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x600340", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/4/env.json b/cmd/evm/testdata/4/env.json new file mode 100644 index 000000000000..e283eff46151 --- /dev/null +++ b/cmd/evm/testdata/4/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8", + "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"} +} \ No newline at end of file diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md new file mode 100644 index 000000000000..08840d37bd9c --- /dev/null +++ b/cmd/evm/testdata/4/readme.md @@ -0,0 +1,3 @@ +These files examplify a transition where a transaction (excuted on block 5) requests +the blockhash for block `4`, but where the hash for that block is missing. +It's expected that executing these should cause `exit` with errorcode `4`. diff --git a/cmd/evm/testdata/4/txs.json b/cmd/evm/testdata/4/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/4/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/5/alloc.json b/cmd/evm/testdata/5/alloc.json new file mode 100644 index 000000000000..9e26dfeeb6e6 --- /dev/null +++ b/cmd/evm/testdata/5/alloc.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/cmd/evm/testdata/5/env.json b/cmd/evm/testdata/5/env.json new file mode 100644 index 000000000000..1085f63e629a --- /dev/null +++ b/cmd/evm/testdata/5/env.json @@ -0,0 +1,11 @@ +{ + "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000", + "ommers": [ + {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" }, + {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" } + ] +} \ No newline at end of file diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md new file mode 100644 index 000000000000..e2b608face9c --- /dev/null +++ b/cmd/evm/testdata/5/readme.md @@ -0,0 +1 @@ +These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file diff --git a/cmd/evm/testdata/5/txs.json b/cmd/evm/testdata/5/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/5/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/7/alloc.json b/cmd/evm/testdata/7/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/7/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/7/env.json b/cmd/evm/testdata/7/env.json new file mode 100644 index 000000000000..8fd9bc041b8c --- /dev/null +++ b/cmd/evm/testdata/7/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "5", + "currentTimestamp": "1000" +} \ No newline at end of file diff --git a/cmd/evm/testdata/7/readme.md b/cmd/evm/testdata/7/readme.md new file mode 100644 index 000000000000..c9826e0ba67e --- /dev/null +++ b/cmd/evm/testdata/7/readme.md @@ -0,0 +1,7 @@ +This is a test for HomesteadToDao, checking if the +DAO-transition works + +Example: +``` +./statet8n --input.alloc=./testdata/7/alloc.json --input.txs=./testdata/7/txs.json --input.env=./testdata/7/env.json --output.alloc=stdout --state.fork=HomesteadToDaoAt5 +``` \ No newline at end of file diff --git a/cmd/evm/testdata/7/txs.json b/cmd/evm/testdata/7/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/7/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh new file mode 100644 index 000000000000..34c92498559a --- /dev/null +++ b/cmd/evm/transition-test.sh @@ -0,0 +1,191 @@ +#!/bin/bash +ticks="\`\`\`" + +function showjson(){ + echo "\`$1\`:" + echo "${ticks}json" + cat $1 + echo "" + echo "$ticks" +} +function demo(){ + echo "$ticks" + echo "$1" + echo "$ticks" + echo "" +} +function tick(){ + echo "$ticks" +} + +cat << EOF +## EVM state transition tool + +The \`evm t8n\` tool is a stateless state transition utility. It is a utility +which can + +1. Take a prestate, including + - Accounts, + - Block context information, + - Previous blockshashes (*optional) +2. Apply a set of transactions, +3. Apply a mining-reward (*optional), +4. And generate a post-state, including + - State root, transaction root, receipt root, + - Information about rejected transactions, + - Optionally: a full or partial post-state dump + +## Specification + +The idea is to specify the behaviour of this binary very _strict_, so that other +node implementors can build replicas based on their own state-machines, and the +state generators can swap between a \`geth\`-based implementation and a \`parityvm\`-based +implementation. + +### Command line params + +Command line params that has to be supported are +$(tick) + +` ./evm t8n -h | grep "trace\|output\|state\."` + +$(tick) + +### Error codes and output + +All logging should happen against the \`stderr\`. +There are a few (not many) errors that can occur, those are defined below. + +#### EVM-based errors (\`2\` to \`9\`) + +- Other EVM error. Exit code \`2\` +- Failed configuration: when a non-supported or invalid fork was specified. Exit code \`3\`. +- Block history is not supplied, but needed for a \`BLOCKHASH\` operation. If \`BLOCKHASH\` + is invoked targeting a block which history has not been provided for, the program will + exit with code \`4\`. + +#### IO errors (\`10\`-\`20\`) + +- Invalid input json: the supplied data could not be marshalled. + The program will exit with code \`10\` +- IO problems: failure to load or save files, the program will exit with code \`11\` + +EOF + +# This should exit with 3 +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --state.fork=Frontier+1346 2>/dev/null +if [ $? != 3 ]; then + echo "Failed, exitcode should be 3" +fi +cat << EOF +## Examples +### Basic usage + +Invoking it with the provided example files +EOF +cmd="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json" +tick;echo "$cmd"; tick +$cmd 2>/dev/null +echo "Two resulting files:" +echo "" +showjson alloc.json +showjson result.json +echo "" + +echo "We can make them spit out the data to e.g. \`stdout\` like this:" +cmd="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout" +tick;echo "$cmd"; tick +output=`$cmd 2>/dev/null` +echo "Output:" +echo "${ticks}json" +echo "$output" +echo "$ticks" + +cat << EOF + +## About Ommers + +Mining rewards and ommer rewards might need to be added. This is how those are applied: + +- \`block_reward\` is the block mining reward for the miner (\`0xaa\`), of a block at height \`N\`. +- For each ommer (mined by \`0xbb\`), with blocknumber \`N-delta\` + - (where \`delta\` is the difference between the current block and the ommer) + - The account \`0xbb\` (ommer miner) is awarded \`(8-delta)/ 8 * block_reward\` + - The account \`0xaa\` (block miner) is awarded \`block_reward / 32\` + +To make \`state_t8n\` apply these, the following inputs are required: + +- \`state.reward\` + - For ethash, it is \`5000000000000000000\` \`wei\`, + - If this is not defined, mining rewards are not applied, + - A value of \`0\` is valid, and causes accounts to be 'touched'. +- For each ommer, the tool needs to be given an \`address\` and a \`delta\`. This + is done via the \`env\`. + +Note: the tool does not verify that e.g. the normal uncle rules apply, +and allows e.g two uncles at the same height, or the uncle-distance. This means that +the tool allows for negative uncle reward (distance > 8) + +Example: +EOF + +showjson ./testdata/5/env.json + +echo "When applying this, using a reward of \`0x08\`" +cmd="./evm t8n --input.alloc=./testdata/5/alloc.json -input.txs=./testdata/5/txs.json --input.env=./testdata/5/env.json --output.alloc=stdout --state.reward=0x80" +output=`$cmd 2>/dev/null` +echo "Output:" +echo "${ticks}json" +echo "$output" +echo "$ticks" + +echo "### Future EIPS" +echo "" +echo "It is also possible to experiment with future eips that are not yet defined in a hard fork." +echo "Example, putting EIP-1344 into Frontier: " +cmd="./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json" +tick;echo "$cmd"; tick +echo "" + +echo "### Block history" +echo "" +echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`." +echo "If a required blockhash is not provided, the exit code should be \`4\`:" +echo "Example where blockhashes are provided: " +cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" +tick && echo $cmd && tick +$cmd 2>&1 >/dev/null +cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" +tick && echo $cmd && tick +echo "$ticks" +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 +echo "$ticks" +echo "" + +echo "In this example, the caller has not provided the required blockhash:" +cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace" +tick && echo $cmd && tick +tick +$cmd +errc=$? +tick +echo "Error code: $errc" + + +echo "### Chaining" +echo "" +echo "Another thing that can be done, is to chain invocations:" +cmd1="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout" +cmd2="./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json" +echo "$ticks" +echo "$cmd1 | $cmd2" +output=$($cmd1 | $cmd2 ) +echo $output +echo "$ticks" +echo "What happened here, is that we first applied two identical transactions, so the second one was rejected. " +echo "Then, taking the poststate alloc as the input for the next state, we tried again to include" +echo "the same two transactions: this time, both failed due to too low nonce." +echo "" +echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the" +echo "actual blocknumber (exposed to the EVM) would not increase." +echo "" diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 77938efabd90..eaf0dc30c1d5 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -43,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -162,7 +163,6 @@ func main() { if blob, err = ioutil.ReadFile(*accPassFlag); err != nil { log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err) } - // Delete trailing newline in password pass := strings.TrimSuffix(string(blob), "\n") ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP) @@ -170,11 +170,12 @@ func main() { log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err) } acc, err := ks.Import(blob, pass, pass) - if err != nil { + if err != nil && err != keystore.ErrAccountAlreadyExists { log.Crit("Failed to import faucet signer account", "err", err) } - ks.Unlock(acc, pass) - + if err := ks.Unlock(acc, pass); err != nil { + log.Crit("Failed to unlock faucet signer account", "err", err) + } // Assemble and start the faucet light service faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes()) if err != nil { @@ -235,23 +236,21 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u if err != nil { return nil, err } + // Assemble the Ethereum light client protocol - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - cfg := eth.DefaultConfig - cfg.SyncMode = downloader.LightSync - cfg.NetworkId = network - cfg.Genesis = genesis - return les.New(ctx, &cfg) - }); err != nil { - return nil, err + cfg := eth.DefaultConfig + cfg.SyncMode = downloader.LightSync + cfg.NetworkId = network + cfg.Genesis = genesis + utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash()) + lesBackend, err := les.New(stack, &cfg) + if err != nil { + return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err) } + // Assemble the ethstats monitoring and reporting service' if stats != "" { - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - var serv *les.LightEthereum - ctx.Service(&serv) - return ethstats.New(stats, nil, serv) - }); err != nil { + if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil { return nil, err } } @@ -268,7 +267,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u // Attach to the client and retrieve and interesting metadatas api, err := stack.Attach() if err != nil { - stack.Stop() + stack.Close() return nil, err } client := ethclient.NewClient(api) @@ -360,11 +359,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } } // Send over the initial stats and the latest header + f.lock.RLock() + reqs := f.reqs + f.lock.RUnlock() if err = send(conn, map[string]interface{}{ "funds": new(big.Int).Div(balance, ether), "funded": nonce, "peers": f.stack.Server().PeerCount(), - "requests": f.reqs, + "requests": reqs, }, 3*time.Second); err != nil { log.Warn("Failed to send initial stats to client", "err", err) return @@ -691,8 +693,11 @@ func authTwitter(url string) (string, string, common.Address, error) { return "", "", common.Address{}, errors.New("Invalid Twitter status URL") } // Twitter's API isn't really friendly with direct links. Still, we don't - // want to do ask read permissions from users, so just load the public posts and - // scrape it for the Ethereum address and profile URL. + // want to do ask read permissions from users, so just load the public posts + // and scrape it for the Ethereum address and profile URL. We need to load + // the mobile page though since the main page loads tweet contents via JS. + url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) + res, err := http.Get(url) if err != nil { return "", "", common.Address{}, err @@ -727,7 +732,10 @@ func authTwitter(url string) (string, string, common.Address, error) { // returning the username, avatar URL and Ethereum address to fund on success. func authFacebook(url string) (string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense - parts := strings.Split(url, "/") + parts := strings.Split(strings.Split(url, "?")[0], "/") + if parts[len(parts)-1] == "" { + parts = parts[0 : len(parts)-1] + } if len(parts) < 4 || parts[len(parts)-2] != "posts" { //lint:ignore ST1005 This error is to be displayed in the browser return "", "", common.Address{}, errors.New("Invalid Facebook post URL") diff --git a/cmd/faucet/faucet.html b/cmd/faucet/faucet.html index 314b19e1232d..ba1433318696 100644 --- a/cmd/faucet/faucet.html +++ b/cmd/faucet/faucet.html @@ -49,7 +49,7 @@

{{
- +