diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml deleted file mode 100644 index 85f2dbf2bcf..00000000000 --- a/.github/workflows/changelog.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Changelog - -on: - pull_request: - types: - - opened - - edited - - synchronize - - reopened - - labeled - - unlabeled - paths: - - '**.go' - - '**/go.mod' - - '**/go.sum' - -jobs: - changelog: - if: contains(github.event.pull_request.title, '[skip changelog]') == false && - contains(github.event.pull_request.labels.*.name, 'skip/changelog') == false - runs-on: ubuntu-latest - name: Changelog - steps: - - id: changelog - env: - GITHUB_TOKEN: ${{ github.token }} - ENDPOINT: repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files - SELECTOR: 'map(select(.filename | startswith("docs/changelogs/"))) | length' - run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "modified={}" | tee -a $GITHUB_OUTPUT - - if: steps.changelog.outputs.modified == '0' - env: - MESSAGE: | - docs/changelogs/ was not modified in this PR. Please do one of the following: - - add a changelog entry - - add `[skip changelog]` to the PR title - - label the PR with `skip/changelog` - run: | - echo "::error::${MESSAGE//$'\n'/%0A}" - exit 1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 8ed324854ff..00000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,49 +0,0 @@ -# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed -name: CodeQL - -on: - workflow_dispatch: - push: - branches: [ master ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ master ] - paths-ignore: - - '**/*.md' - schedule: - - cron: '30 12 * * 2' - -permissions: - contents: read # to fetch code (actions/checkout) - security-events: write # (github/codeql-action/autobuild) - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - codeql: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 20 - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: go - - - name: Autobuild - uses: github/codeql-action/autobuild@v3 - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml deleted file mode 100644 index e13a3f88231..00000000000 --- a/.github/workflows/docker-build.yml +++ /dev/null @@ -1,34 +0,0 @@ -# If we decide to run build-image.yml on every PR, we could deprecate this workflow. -name: Docker Build - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - docker-build: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 10 - env: - IMAGE_NAME: ipfs/kubo - WIP_IMAGE_TAG: wip - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - uses: actions/checkout@v4 - - run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG . - - run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml deleted file mode 100644 index f8380924046..00000000000 --- a/.github/workflows/docker-image.yml +++ /dev/null @@ -1,143 +0,0 @@ -name: Docker Push - -on: - workflow_dispatch: - inputs: - push: - description: 'Push to Docker Hub' - required: true - default: 'false' - tags: - description: 'Custom tags to use for the push' - required: false - default: '' - # # If we decide to build all images on every PR, we should make sure that - # # they are NOT pushed to Docker Hub. - # pull_request: - # paths-ignore: - # - '**/*.md' - push: - branches: - - 'master' - - 'staging' - - 'bifrost-*' - tags: - - 'v*' - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - docker-hub: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - name: Push Docker image to Docker Hub - runs-on: ubuntu-latest - timeout-minutes: 15 - env: - IMAGE_NAME: ipfs/kubo - LEGACY_IMAGE_NAME: ipfs/go-ipfs - steps: - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Cache Docker layers - uses: actions/cache@v4 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Get tags - id: tags - if: github.event.inputs.tags == '' - run: | - echo "value<> $GITHUB_OUTPUT - ./bin/get-docker-tags.sh "$(date -u +%F)" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - shell: bash - - - name: Log in to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - # We have to build each platform separately because when using multi-arch - # builds, only one platform is being loaded into the cache. This would - # prevent us from testing the other platforms. - - name: Build Docker image (linux/amd64) - uses: docker/build-push-action@v6 - with: - platforms: linux/amd64 - context: . - push: false - load: true - file: ./Dockerfile - tags: ${{ env.IMAGE_NAME }}:linux-amd64 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - - - name: Build Docker image (linux/arm/v7) - uses: docker/build-push-action@v6 - with: - platforms: linux/arm/v7 - context: . - push: false - load: true - file: ./Dockerfile - tags: ${{ env.IMAGE_NAME }}:linux-arm-v7 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - - - name: Build Docker image (linux/arm64/v8) - uses: docker/build-push-action@v6 - with: - platforms: linux/arm64/v8 - context: . - push: false - load: true - file: ./Dockerfile - tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # We test all the images on amd64 host here. This uses QEMU to emulate - # the other platforms. - # NOTE: --version should finish instantly, but sometimes - # it hangs on github CI (could be qemu issue), so we retry to remove false negatives - - name: Smoke-test linux-amd64 - run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-amd64 version --all && break || [ $i = 3 ] && exit 1; done - timeout-minutes: 1 - - name: Smoke-test linux-arm-v7 - run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm-v7 version --all && break || [ $i = 3 ] && exit 1; done - timeout-minutes: 1 - - name: Smoke-test linux-arm64-v8 - run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm64-v8 version --all && break || [ $i = 3 ] && exit 1; done - timeout-minutes: 1 - - # This will only push the previously built images. - - if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true' - name: Publish to Docker Hub - uses: docker/build-push-action@v6 - with: - platforms: linux/amd64,linux/arm/v7,linux/arm64/v8 - context: . - push: true - file: ./Dockerfile - tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}" - cache-from: type=local,src=/tmp/.buildx-cache-new - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache to limit growth - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml deleted file mode 100644 index 8e254532244..00000000000 --- a/.github/workflows/gateway-conformance.yml +++ /dev/null @@ -1,228 +0,0 @@ -name: Gateway Conformance - -on: - workflow_dispatch: - push: - branches: - - master - pull_request: - paths-ignore: - - '**/*.md' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -defaults: - run: - shell: bash - -env: - # hostnames expected by https://github.com/ipfs/gateway-conformance - GATEWAY_PUBLIC_GATEWAYS: | - { - "example.com": { - "UseSubdomains": true, - "InlineDNSLink": true, - "Paths": ["/ipfs", "/ipns"] - }, - "localhost": { - "UseSubdomains": true, - "InlineDNSLink": true, - "Paths": ["/ipfs", "/ipns"] - } - } - -jobs: - # Testing all gateway features via TCP port specified in Addresses.Gateway - gateway-conformance: - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - # 1. Download the gateway-conformance fixtures - - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.7 - with: - output: fixtures - - # 2. Build the kubo-gateway - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - uses: protocol/cache-go-action@v1 - with: - name: ${{ github.job }} - - name: Checkout kubo-gateway - uses: actions/checkout@v4 - with: - path: kubo-gateway - - name: Build kubo-gateway - run: make build - working-directory: kubo-gateway - - # 3. Init the kubo-gateway - - name: Init kubo-gateway - run: | - ./ipfs init -e - ./ipfs config --json Gateway.PublicGateways "$GATEWAY_PUBLIC_GATEWAYS" - working-directory: kubo-gateway/cmd/ipfs - - # 4. Populate the Kubo gateway with the gateway-conformance fixtures - - name: Import fixtures - run: | - # Import car files - find ./fixtures -name '*.car' -exec kubo-gateway/cmd/ipfs/ipfs dag import --pin-roots=false {} \; - - # Import ipns records - records=$(find ./fixtures -name '*.ipns-record') - for record in $records - do - key=$(basename -s .ipns-record "$record" | cut -d'_' -f1) - kubo-gateway/cmd/ipfs/ipfs routing put --allow-offline "/ipns/$key" "$record" - done - - # Import dnslink records - # the IPFS_NS_MAP env will be used by the daemon - echo "IPFS_NS_MAP=$(cat ./fixtures/dnslinks.IPFS_NS_MAP)" >> $GITHUB_ENV - - # 5. Start the kubo-gateway - - name: Start kubo-gateway - run: | - ./ipfs daemon --offline & - working-directory: kubo-gateway/cmd/ipfs - - # 6. Run the gateway-conformance tests - - name: Run gateway-conformance tests - uses: ipfs/gateway-conformance/.github/actions/test@v0.7 - with: - gateway-url: http://127.0.0.1:8080 - subdomain-url: http://localhost:8080 - args: -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length' - json: output.json - xml: output.xml - html: output.html - markdown: output.md - - # 7. Upload the results - - name: Upload MD summary - if: failure() || success() - run: cat output.md >> $GITHUB_STEP_SUMMARY - - name: Upload HTML report - if: failure() || success() - uses: actions/upload-artifact@v4 - with: - name: gateway-conformance.html - path: output.html - - name: Upload JSON report - if: failure() || success() - uses: actions/upload-artifact@v4 - with: - name: gateway-conformance.json - path: output.json - - # Testing trustless gateway feature subset exposed as libp2p protocol - gateway-conformance-libp2p-experiment: - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - # 1. Download the gateway-conformance fixtures - - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.7 - with: - output: fixtures - - # 2. Build the kubo-gateway - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - uses: protocol/cache-go-action@v1 - with: - name: ${{ github.job }} - - name: Checkout kubo-gateway - uses: actions/checkout@v4 - with: - path: kubo-gateway - - name: Build kubo-gateway - run: make build - working-directory: kubo-gateway - - # 3. Init the kubo-gateway - - name: Init kubo-gateway - run: | - ./ipfs init --profile=test - ./ipfs config --json Gateway.PublicGateways "$GATEWAY_PUBLIC_GATEWAYS" - ./ipfs config --json Experimental.GatewayOverLibp2p true - ./ipfs config Addresses.Gateway "/ip4/127.0.0.1/tcp/8080" - ./ipfs config Addresses.API "/ip4/127.0.0.1/tcp/5001" - working-directory: kubo-gateway/cmd/ipfs - - # 4. Populate the Kubo gateway with the gateway-conformance fixtures - - name: Import fixtures - run: | - # Import car files - find ./fixtures -name '*.car' -exec kubo-gateway/cmd/ipfs/ipfs dag import --pin-roots=false {} \; - - # 5. Start the kubo-gateway - - name: Start kubo-gateway - run: | - ( ./ipfs daemon & ) | sed '/Daemon is ready/q' - while [[ "$(./ipfs id | jq '.Addresses | length')" == '0' ]]; do sleep 1; done - working-directory: kubo-gateway/cmd/ipfs - - # 6. Setup a kubo http-p2p-proxy to expose libp2p protocol as a regular HTTP port for gateway conformance tests - - name: Init p2p-proxy kubo node - env: - IPFS_PATH: "~/.kubo-p2p-proxy" - run: | - ./ipfs init --profile=test -e - ./ipfs config --json Experimental.Libp2pStreamMounting true - ./ipfs config Addresses.Gateway "/ip4/127.0.0.1/tcp/8081" - ./ipfs config Addresses.API "/ip4/127.0.0.1/tcp/5002" - working-directory: kubo-gateway/cmd/ipfs - - # 7. Start the kubo http-p2p-proxy - - name: Start kubo http-p2p-proxy - env: - IPFS_PATH: "~/.kubo-p2p-proxy" - run: | - ( ./ipfs daemon & ) | sed '/Daemon is ready/q' - while [[ "$(./ipfs id | jq '.Addresses | length')" == '0' ]]; do sleep 1; done - working-directory: kubo-gateway/cmd/ipfs - - # 8. Start forwarding data from the http-p2p-proxy to the node serving the Gateway API over libp2p - - name: Start http-over-libp2p forwarding proxy - run: | - gatewayNodeId=$(./ipfs --api=/ip4/127.0.0.1/tcp/5001 id -f="") - ./ipfs --api=/ip4/127.0.0.1/tcp/5002 swarm connect $(./ipfs --api=/ip4/127.0.0.1/tcp/5001 swarm addrs local --id | head -n 1) - ./ipfs --api=/ip4/127.0.0.1/tcp/5002 p2p forward --allow-custom-protocol /http/1.1 /ip4/127.0.0.1/tcp/8092 /p2p/$gatewayNodeId - working-directory: kubo-gateway/cmd/ipfs - - # 9. Run the gateway-conformance tests over libp2p - - name: Run gateway-conformance tests over libp2p - uses: ipfs/gateway-conformance/.github/actions/test@v0.7 - with: - gateway-url: http://127.0.0.1:8092 - args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length' - json: output.json - xml: output.xml - html: output.html - markdown: output.md - - # 10. Upload the results - - name: Upload MD summary - if: failure() || success() - run: cat output.md >> $GITHUB_STEP_SUMMARY - - name: Upload HTML report - if: failure() || success() - uses: actions/upload-artifact@v4 - with: - name: gateway-conformance-libp2p.html - path: output.html - - name: Upload JSON report - if: failure() || success() - uses: actions/upload-artifact@v4 - with: - name: gateway-conformance-libp2p.json - path: output.json diff --git a/.github/workflows/gobuild.yml b/.github/workflows/gobuild.yml deleted file mode 100644 index 5aebfd9385f..00000000000 --- a/.github/workflows/gobuild.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Go Build - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - go-build: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 - env: - TEST_DOCKER: 0 - TEST_VERBOSE: 1 - TRAVIS: 1 - GIT_PAGER: cat - IPFS_CHECK_RCMGR_DEFAULTS: 1 - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - uses: actions/checkout@v4 - - run: make cmd/ipfs-try-build - env: - TEST_FUSE: 1 - - run: make cmd/ipfs-try-build - env: - TEST_FUSE: 0 diff --git a/.github/workflows/golang-analysis.yml b/.github/workflows/golang-analysis.yml deleted file mode 100644 index 36a5bba01ba..00000000000 --- a/.github/workflows/golang-analysis.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Go Check - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -permissions: - contents: read # to fetch code (actions/checkout) - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - go-check: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - uses: actions/setup-go@v5 - with: - go-version: "1.23.x" - - name: Check that go.mod is tidy - uses: protocol/multiple-go-modules@v1.4 - with: - run: | - go mod tidy - if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then - echo "go.sum was added by go mod tidy" - exit 1 - fi - git diff --exit-code -- go.sum go.mod - - name: go fmt - if: always() # run this step even if the previous one failed - run: | - out=$(go fmt ./...) - if [[ -n "$out" ]]; then - echo "Files are not go-fmt-ed:" - echo "$out" - exit 1 - fi - - name: go vet - if: always() # run this step even if the previous one failed - uses: protocol/multiple-go-modules@v1.4 - with: - run: go vet ./... diff --git a/.github/workflows/golint.yml b/.github/workflows/golint.yml deleted file mode 100644 index 57b5d46ddd5..00000000000 --- a/.github/workflows/golint.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Go Lint - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - go-lint: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 10 - env: - TEST_DOCKER: 0 - TEST_FUSE: 0 - TEST_VERBOSE: 1 - TRAVIS: 1 - GIT_PAGER: cat - IPFS_CHECK_RCMGR_DEFAULTS: 1 - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - uses: actions/checkout@v4 - - run: make -O test_go_lint diff --git a/.github/workflows/gotest.yml b/.github/workflows/gotest.yml deleted file mode 100644 index 4e1a227c2cf..00000000000 --- a/.github/workflows/gotest.yml +++ /dev/null @@ -1,109 +0,0 @@ -name: Go Test - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - go-test: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 - env: - TEST_DOCKER: 0 - TEST_FUSE: 0 - TEST_VERBOSE: 1 - TRAVIS: 1 - GIT_PAGER: cat - IPFS_CHECK_RCMGR_DEFAULTS: 1 - defaults: - run: - shell: bash - steps: - - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.x - - name: Check out Kubo - uses: actions/checkout@v4 - - name: Install missing tools - run: sudo apt update && sudo apt install -y zsh - - name: πŸ‘‰οΈ If this step failed, go to Β«SummaryΒ» (top left) β†’ inspect the Β«Failures/ErrorsΒ» table - env: - # increasing parallelism beyond 2 doesn't speed up the tests much - PARALLEL: 2 - run: | - make -j "$PARALLEL" test/unit/gotest.junit.xml && - [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]] - - name: Upload coverage to Codecov - uses: codecov/codecov-action@1e68e06f1dbfde0e4cefc87efeba9e4643565303 # v5.1.2 - if: failure() || success() - with: - name: unittests - files: coverage/unit_tests.coverprofile - - name: Test kubo-as-a-library example - run: | - # we want to first test with the kubo version in the go.mod file - go test -v ./... - - # we also want to test the examples against the current version of kubo - # however, that version might be in a fork so we need to replace the dependency - - # backup the go.mod and go.sum files to restore them after we run the tests - cp go.mod go.mod.bak - cp go.sum go.sum.bak - - # make sure the examples run against the current version of kubo - go mod edit -replace github.com/ipfs/kubo=./../../.. - go mod tidy - - go test -v ./... - - # restore the go.mod and go.sum files to their original state - mv go.mod.bak go.mod - mv go.sum.bak go.sum - working-directory: docs/examples/kubo-as-a-library - - name: Create a proper JUnit XML report - uses: ipdxco/gotest-json-to-junit-xml@v1 - with: - input: test/unit/gotest.json - output: test/unit/gotest.junit.xml - if: failure() || success() - - name: Archive the JUnit XML report - uses: actions/upload-artifact@v4 - with: - name: unit - path: test/unit/gotest.junit.xml - if: failure() || success() - - name: Create a HTML report - uses: ipdxco/junit-xml-to-html@v1 - with: - mode: no-frames - input: test/unit/gotest.junit.xml - output: test/unit/gotest.html - if: failure() || success() - - name: Archive the HTML report - uses: actions/upload-artifact@v4 - with: - name: html - path: test/unit/gotest.html - if: failure() || success() - - name: Create a Markdown report - uses: ipdxco/junit-xml-to-html@v1 - with: - mode: summary - input: test/unit/gotest.junit.xml - output: test/unit/gotest.md - if: failure() || success() - - name: Set the summary - run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY - if: failure() || success() diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml deleted file mode 100644 index bfa6523de4e..00000000000 --- a/.github/workflows/interop.yml +++ /dev/null @@ -1,136 +0,0 @@ -name: Interop - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -env: - GO_VERSION: 1.23.x - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -defaults: - run: - shell: bash - -jobs: - interop-prep: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 5 - env: - TEST_DOCKER: 0 - TEST_FUSE: 0 - TEST_VERBOSE: 1 - TRAVIS: 1 - GIT_PAGER: cat - IPFS_CHECK_RCMGR_DEFAULTS: 1 - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v4 - - run: make build - - uses: actions/upload-artifact@v4 - with: - name: kubo - path: cmd/ipfs/ipfs - helia-interop: - needs: [interop-prep] - runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 - defaults: - run: - shell: bash - steps: - - uses: actions/setup-node@v4 - with: - node-version: lts/* - - uses: actions/download-artifact@v4 - with: - name: kubo - path: cmd/ipfs - - run: chmod +x cmd/ipfs/ipfs - - run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT - id: npm-cache-dir - - uses: actions/cache@v4 - with: - path: ${{ steps.npm-cache-dir.outputs.dir }} - key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }} - restore-keys: ${{ runner.os }}-${{ github.job }}-helia- - - run: sudo apt update - - run: sudo apt install -y libxkbcommon0 libxdamage1 libgbm1 libpango-1.0-0 libcairo2 # dependencies for playwright - - run: npx --package @helia/interop helia-interop - env: - KUBO_BINARY: ${{ github.workspace }}/cmd/ipfs/ipfs - ipfs-webui: - needs: [interop-prep] - runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 - env: - NO_SANDBOX: true - LIBP2P_TCP_REUSEPORT: false - LIBP2P_ALLOW_WEAK_RSA_KEYS: 1 - E2E_IPFSD_TYPE: go - TRAVIS: 1 - GIT_PAGER: cat - IPFS_CHECK_RCMGR_DEFAULTS: 1 - defaults: - run: - shell: bash - steps: - - uses: actions/setup-node@v4 - with: - node-version: 18.14.0 - - uses: actions/download-artifact@v4 - with: - name: kubo - path: cmd/ipfs - - run: chmod +x cmd/ipfs/ipfs - - uses: actions/checkout@v4 - with: - repository: ipfs/ipfs-webui - path: ipfs-webui - - run: | - echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT - id: npm-cache-dir - - uses: actions/cache@v4 - with: - path: ${{ steps.npm-cache-dir.outputs.dir }} - key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - ${{ runner.os }}-${{ github.job }}- - - env: - NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }} - run: | - npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR" - npx playwright install --with-deps - working-directory: ipfs-webui - - id: ref - run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT - working-directory: ipfs-webui - - id: state - env: - GITHUB_TOKEN: ${{ github.token }} - ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status - SELECTOR: .state - KEY: state - run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT - - name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }}) - run: npm run test:build - working-directory: ipfs-webui - - name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary - run: npm run test:e2e - env: - IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs - working-directory: ipfs-webui diff --git a/.github/workflows/sharness.yml b/.github/workflows/sharness.yml index dbb4a5d192d..d7a8d0dab43 100644 --- a/.github/workflows/sharness.yml +++ b/.github/workflows/sharness.yml @@ -4,10 +4,10 @@ on: workflow_dispatch: pull_request: paths-ignore: - - '**/*.md' + - "**/*.md" push: branches: - - 'master' + - "master" concurrency: group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} @@ -31,7 +31,7 @@ jobs: with: path: kubo - name: Install missing tools - run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils + run: sudo apt update && sudo apt install -y socat=1.7.4 net-tools fish libxml2-utils && socat -V - uses: actions/cache@v4 with: path: test/sharness/lib/dependencies diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 16d65d72175..00000000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Close and mark stale issue - -on: - schedule: - - cron: '0 0 * * *' - -permissions: - issues: write - pull-requests: write - -jobs: - stale: - uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3 diff --git a/.github/workflows/sync-release-assets.yml b/.github/workflows/sync-release-assets.yml deleted file mode 100644 index 0d5c8199b65..00000000000 --- a/.github/workflows/sync-release-assets.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: Sync GitHub Release Assets - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - -concurrency: - group: release-assets-dist-sync - cancel-in-progress: true - -permissions: - contents: write # to upload release asset - -jobs: - dist-ipfs-tech: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: "ubuntu-latest" - timeout-minutes: 15 - steps: - - uses: ipfs/download-ipfs-distribution-action@v1 - - uses: ipfs/start-ipfs-daemon-action@v1 - with: - args: --init --init-profile=flatfs,server --enable-gc=false - - uses: actions/setup-node@v4 - with: - node-version: 14 - - name: Sync the latest 5 github releases - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs').promises - const max_synced = 5 - - // fetch github releases - resp = await github.rest.repos.listReleases({ - owner: context.repo.owner, - repo: context.repo.repo, - page: 1, - per_page: max_synced - }) - const release_assets = []; - num_synced = 0; - for (const release of resp.data) { - console.log("checking release tagged", release.tag_name) - if (num_synced > max_synced) { - console.log("done: synced", max_synced, "latest releases") - break; - } - num_synced += 1 - - const github_assets = new Set() - for (const asset of release.assets) { - github_assets.add(asset.name) - } - - // fetch asset info from dist.ipfs.tech - p = '/ipns/dist.ipfs.tech/kubo/' + release.tag_name - let stdout = '' - const options = {} - options.listeners = { - stdout: (data) => { - stdout += data.toString(); - } - } - await exec.exec('ipfs', ['ls', p], options) - - const dist_assets = new Set() - const missing_files = [] - for (const raw_line of stdout.split("\n")) { - line = raw_line.trim(); - if (line.length != 0) { - file = line.split(/(\s+)/).filter( function(e) { return e.trim().length > 0; } )[2] - dist_assets.add(file) - if (!github_assets.has(file)) { - missing_files.push(file) - } - } - } - - // if dist.ipfs.tech has files not found in github, copy them over - for (const file of missing_files) { - file_sha = file + ".sha512" - file_cid = file + ".cid" - - // skip files that don't have .cid and .sha512 checksum files - if (!dist_assets.has(file_sha) || !dist_assets.has(file_cid)) { - if (!file.endsWith('.cid') && !file.endsWith('.sha512')) { // silent skip of .sha512.sha512 :) - console.log(`skipping "${file}" as dist.ipfs.tech does not provide .cid and .sha512 checksum files for it`) - } - continue - } - - console.log("fetching", file, "from dist.ipfs.tech") - await exec.exec('ipfs', ['get', p + '/' + file]) - await exec.exec('ipfs', ['get', p + '/' + file_sha]) - await exec.exec('ipfs', ['get', p + '/' + file_cid]) - console.log("verifying contents of", file) - - // compute sha512 output for file - let sha_stdout = '' - const sha_options = {} - sha_options.listeners = { - stdout: (data) => { - sha_stdout += data.toString(); - } - } - await exec.exec('sha512sum', [file], sha_options) - // read expected sha512 output - const sha_data = await fs.readFile(file_sha, "utf8") - const digest = (s) => s.split(' ').shift() - if (digest(sha_data) != digest(sha_stdout)) { - console.log(`${file}.sha512: ${sha_data}`) - console.log(`sha512sum ${file}: ${sha_stdout}`) - throw "checksum verification failed for " + file - } - - console.log("uploading", file, "to github release", release.tag_name) - const uploadReleaseAsset = async (file) => github.rest.repos.uploadReleaseAsset({ - owner: context.repo.owner, - repo: context.repo.repo, - release_id: release.id, - headers: { - "content-type": "application/octet-stream", - "content-length": `${(await fs.stat(file)).size}` - }, - name: file, - data: await fs.readFile(file) - }) - await uploadReleaseAsset(file) - await uploadReleaseAsset(file_sha) - await uploadReleaseAsset(file_cid) - - } - // summary of assets on both sides - release_assets.push({ tag: release.tag_name, github_assets, dist_assets }) - } - console.log(release_assets) - return release_assets diff --git a/test/sharness/t0001-tests-work.sh b/test/sharness/t0001-tests-work.sh deleted file mode 100755 index 6394fed77d6..00000000000 --- a/test/sharness/t0001-tests-work.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test sharness tests are correctly written" - -. lib/test-lib.sh - -for file in $(find .. -maxdepth 1 -name 't*.sh' -type f); do - test_expect_success "test in $file finishes" ' - grep -q "^test_done\b" "$file" - ' - - test_expect_success "test in $file has a description" ' - grep -q "^test_description=" "$file" - ' - - # We have some tests that manually kill. - case "$(basename "$file")" in - t0060-daemon.sh|t0023-shutdown.sh) continue ;; - esac - - test_expect_success "test in $file has matching ipfs start/stop" ' - awk "/^ *[^#]*test_launch_ipfs_daemon/ { if (count != 0) { exit(1) }; count++ } /^ *[^#]*test_kill_ipfs_daemon/ { if (count != 1) { exit(1) }; count-- } END { exit(count) }" "$file" - ' -done - -test_done diff --git a/test/sharness/t0002-docker-image.sh b/test/sharness/t0002-docker-image.sh deleted file mode 100755 index 2ff827806ba..00000000000 --- a/test/sharness/t0002-docker-image.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test docker image" - -. lib/test-lib.sh - -# if in travis CI on OSX, docker is not available -if ! test_have_prereq DOCKER; then - skip_all='skipping docker tests, docker not available' - - test_done -fi - -test_expect_success "'docker --version' works" ' - docker --version >actual -' - -test_expect_success "'docker --version' output looks good" ' - egrep "^Docker version" actual -' - -TEST_TRASH_DIR=$(pwd) -TEST_SCRIPTS_DIR=$(dirname "$TEST_TRASH_DIR") -TEST_TESTS_DIR=$(dirname "$TEST_SCRIPTS_DIR") -APP_ROOT_DIR=$(dirname "$TEST_TESTS_DIR") -IMAGE_TAG=kubo_test - -test_expect_success "docker image build succeeds" ' - docker_build "$IMAGE_TAG" "$TEST_TESTS_DIR/../Dockerfile" "$APP_ROOT_DIR" || - test_fsh echo "TEST_TESTS_DIR: $TEST_TESTS_DIR" || - test_fsh echo "APP_ROOT_DIR : $APP_ROOT_DIR" -' - -test_expect_success "write init scripts" ' - echo "ipfs config Foo Bar" > 001.sh && - echo "ipfs config Baz Qux" > 002.sh && - chmod +x 002.sh -' - -test_expect_success "docker image runs" ' - DOC_ID=$(docker run -d \ - -p 127.0.0.1:5001:5001 -p 127.0.0.1:8080:8080 \ - -v "$PWD/001.sh":/container-init.d/001.sh \ - -v "$PWD/002.sh":/container-init.d/002.sh \ - "$IMAGE_TAG") -' - -test_expect_success "docker container gateway is up" ' - pollEndpoint -host=/ip4/127.0.0.1/tcp/8080 -http-url http://localhost:8080/ipfs/bafkqaddimvwgy3zao5xxe3debi -v -tries 30 -tout 1s -' - -test_expect_success "docker container API is up" ' - pollEndpoint -host=/ip4/127.0.0.1/tcp/5001 -http-url http://localhost:5001/version -v -tries 30 -tout 1s -' - -test_expect_success "check that init scripts were run correctly and in the correct order" " - echo -e \"Sourcing '/container-init.d/001.sh'...\nExecuting '/container-init.d/002.sh'...\" > expected && - docker logs $DOC_ID 2>/dev/null | grep -e 001.sh -e 002.sh > actual && - test_cmp actual expected -" - -test_expect_success "check that init script configs were applied" ' - echo Bar > expected && - docker exec "$DOC_ID" ipfs config Foo > actual && - test_cmp actual expected && - echo Qux > expected && - docker exec "$DOC_ID" ipfs config Baz > actual && - test_cmp actual expected -' - -test_expect_success "simple ipfs add/cat can be run in docker container" ' - echo "Hello Worlds" | tr -d "[:cntrl:]" > expected && - HASH=$(docker_exec "$DOC_ID" "echo $(cat expected) | ipfs add -q" | tr -d "[:cntrl:]") && - docker_exec "$DOC_ID" "ipfs cat $HASH" | tr -d "[:cntrl:]" > actual && - test_cmp expected actual -' - -read testcode <actual ; \ - test -s actual ; \ - docker exec -i "$DOC_ID" ipfs version --enc json \ - | sed 's/^.*"Commit":"\\\([^"]*\\\)".*$/\\\1/g' >expected ; \ - test -s expected ; \ - test_cmp expected actual -EOF -test_expect_success "version CurrentCommit is set" "$testcode" - -test_expect_success "stop docker container" ' - docker_stop "$DOC_ID" -' - -docker_rm "$DOC_ID" -docker_rmi "$IMAGE_TAG" -test_done diff --git a/test/sharness/t0003-docker-migrate.sh b/test/sharness/t0003-docker-migrate.sh deleted file mode 100755 index c2c7ce9697c..00000000000 --- a/test/sharness/t0003-docker-migrate.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Whyrusleeping -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test docker image migration" - -. lib/test-lib.sh - -# if in travis CI on OSX, docker is not available -if ! test_have_prereq DOCKER; then - skip_all='skipping '$test_description', docker not available' - - test_done -fi - -if ! test_have_prereq SOCAT; then - skip_all="skipping '$test_description': socat is not available" - test_done -fi - -TEST_TRASH_DIR=$(pwd) -TEST_SCRIPTS_DIR=$(dirname "$TEST_TRASH_DIR") -TEST_TESTS_DIR=$(dirname "$TEST_SCRIPTS_DIR") -APP_ROOT_DIR=$(dirname "$TEST_TESTS_DIR") -IMAGE_TAG=kubo_migrate - -test_expect_success "docker image build succeeds" ' - docker_build "$IMAGE_TAG" "$TEST_TESTS_DIR/../Dockerfile" "$APP_ROOT_DIR" -' - -test_init_ipfs - -test_expect_success "configure migration sources" ' - ipfs config --json Migration.DownloadSources "[\"http://127.0.0.1:17233\"]" -' - -test_expect_success "setup http response" ' - mkdir migration && - echo "v1.1.1" > migration/versions && - mkdir -p migration/fs-repo-6-to-7 && - echo "v1.1.1" > migration/fs-repo-6-to-7/versions && - CID=$(ipfs add -r -Q migration) && - echo "HTTP/1.1 200 OK" > vers_resp && - echo "Content-Type: application/vnd.ipld.car" >> vers_resp && - echo "" >> vers_resp && - ipfs dag export $CID >> vers_resp -' - -test_expect_success "make repo be version 4" ' - echo 4 > "$IPFS_PATH/version" -' - -test_expect_success "startup fake dists server" ' - ( socat tcp-listen:17233,fork,bind=127.0.0.1,reuseaddr "SYSTEM:cat vers_resp"!!STDERR 2> dist_serv_out ) & - echo $! > netcat_pid -' - -test_expect_success "docker image runs" ' - DOC_ID=$(docker run -d -v "$IPFS_PATH":/data/ipfs -e IPFS_DIST_PATH=/ipfs/$CID --net=host "$IMAGE_TAG") -' - -test_expect_success "docker container tries to pull migrations from netcat" ' - sleep 4 && - cat dist_serv_out -' - -test_expect_success "see logs" ' - docker logs $DOC_ID -' - -test_expect_success "stop docker container" ' - docker_stop "$DOC_ID" -' - -test_expect_success "kill the net cat" ' - kill $(cat netcat_pid) || true -' - -test_expect_success "correct version was requested" ' - grep "/fs-repo-6-to-7/v1.1.1/fs-repo-6-to-7_v1.1.1_linux-amd64.tar.gz" dist_serv_out > /dev/null -' - -docker_rm "$DOC_ID" -docker_rmi "$IMAGE_TAG" -test_done diff --git a/test/sharness/t0012-completion-fish.sh b/test/sharness/t0012-completion-fish.sh deleted file mode 100755 index 9c794d993db..00000000000 --- a/test/sharness/t0012-completion-fish.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test generated fish completions" - -. lib/test-lib.sh - -test_expect_success "'ipfs commands completion fish' succeeds" ' - ipfs commands completion fish > completions.fish -' - -test_expect_success "generated completions completes 'ipfs version'" ' - fish -c "source completions.fish && complete -C \"ipfs ver\" | grep -q \"version.Show IPFS version information.\" " -' - -test_done - diff --git a/test/sharness/t0015-basic-sh-functions.sh b/test/sharness/t0015-basic-sh-functions.sh deleted file mode 100755 index 81c67d9d279..00000000000 --- a/test/sharness/t0015-basic-sh-functions.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test some basic shell functions" - -. lib/test-lib.sh - -test_expect_success "shellquote works with simple stuff" ' - var=$(shellquote one two) -' - -test_expect_success "shellquote output looks good" ' - test "$var" = "'\''one'\'' '\''two'\''" || - test_fsh echo "var is \"$var\" instead of \"'\''one'\'' '\''two'\''\"" -' - -# The following two printf statements are equivalent: -# printf "%s\n" \''"foo\ -# bar' -# printf "\047\042\146\157\157\134\012\142\141\162\012" -# We use the second one to simplify quoting. - -test_expect_success "shellquote works with complex printf" ' - eval "$(shellquote printf "\047\042\146\157\157\134\012\142\141\162\012")" >actual -' - -test_expect_success "shellquote output looks good" ' - printf "\047\042\146\157\157\134\012\142\141\162\012" >expected && - test_cmp expected actual -' - -test_expect_success "shellquote works with many different bytes" ' - bytes_sans_NUL=$( - printf "\001\002\003\004\005\006\007\010\011\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037\040\041\042\043\044%%\046\047\050\051\052\053\054\055\056\057\060\061\062\063\064\065\066\067\070\071\072\073\074\075\076\077\100\101\102\103\104\105\106\107\110\111\112\113\114\115\116\117\120\121\122\123\124\125\126\127\130\131\132\133\134\135\136\137\140\141\142\143\144\145\146\147\150\151\152\153\154\155\156\157\160\161\162\163\164\165\166\167\170\171\172\173\174\175\176\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377" - ) && - eval "$(shellquote printf "%s" "$bytes_sans_NUL")" >actual -' - -test_expect_success "shellquote output looks good" ' - printf "%s" "$bytes_sans_NUL" >expected && - test_cmp expected actual -' - -test_done diff --git a/test/sharness/t0018-indent.sh b/test/sharness/t0018-indent.sh deleted file mode 100755 index 5fa398fd2f5..00000000000 --- a/test/sharness/t0018-indent.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test sharness test indent" - -. lib/test-lib.sh - -for file in $(find .. -name 't*.sh' -type f); do - test_expect_success "indent in $file is not using tabs" ' - test_must_fail grep -P "^ *\t" $file - ' -done - -test_done diff --git a/test/sharness/t0021-config.sh b/test/sharness/t0021-config.sh deleted file mode 100755 index 95a8a7d8746..00000000000 --- a/test/sharness/t0021-config.sh +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test config command" - -. lib/test-lib.sh - -# we use a function so that we can run it both offline + online -test_config_cmd_set() { - - # flags (like --bool in "ipfs config --bool") - cfg_flags="" # unset in case. - test "$#" = 3 && { cfg_flags=$1; shift; } - - cfg_key=$1 - cfg_val=$2 - test_expect_success "ipfs config succeeds" ' - ipfs config $cfg_flags "$cfg_key" "$cfg_val" - ' - - test_expect_success "ipfs config output looks good" ' - echo "$cfg_val" >expected && - ipfs config "$cfg_key" >actual && - test_cmp expected actual - ' - - # also test our lib function. it should work too. - cfg_key="Lib.$cfg_key" - test_expect_success "test_config_set succeeds" ' - test_config_set $cfg_flags "$cfg_key" "$cfg_val" - ' - - test_expect_success "test_config_set value looks good" ' - echo "$cfg_val" >expected && - ipfs config "$cfg_key" >actual && - test_cmp expected actual - ' -} - -# this is a bit brittle. the problem is we need to test -# with something that will be forced to unmarshal as a struct. -# (i.e. just setting 'ipfs config --json foo "[1, 2, 3]"') may -# set it as astring instead of proper json. We leverage the -# unmarshalling that has to happen. -CONFIG_SET_JSON_TEST='{ - "MDNS": { - "Enabled": true, - "Interval": 10 - } -}' - -test_profile_apply_revert() { - profile=$1 - inverse_profile=$2 - - test_expect_success "save expected config" ' - ipfs config show >expected - ' - - test_expect_success "'ipfs config profile apply ${profile}' works" ' - ipfs config profile apply '${profile}' - ' - - test_expect_success "profile ${profile} changed something" ' - ipfs config show >actual && - test_must_fail test_cmp expected actual - ' - - test_expect_success "'ipfs config profile apply ${inverse_profile}' works" ' - ipfs config profile apply '${inverse_profile}' - ' - - test_expect_success "config is back to previous state after ${inverse_profile} was applied" ' - ipfs config show >actual && - test_cmp expected actual - ' -} - -test_profile_apply_dry_run_not_alter() { - profile=$1 - - test_expect_success "'ipfs config profile apply ${profile} --dry-run' doesn't alter config" ' - cat "$IPFS_PATH/config" >expected && - ipfs config profile apply '${profile}' --dry-run && - cat "$IPFS_PATH/config" >actual && - test_cmp expected actual - ' -} - -test_config_cmd() { - test_config_cmd_set "beep" "boop" - test_config_cmd_set "beep1" "boop2" - test_config_cmd_set "beep1" "boop2" - test_config_cmd_set "--bool" "beep2" "true" - test_config_cmd_set "--bool" "beep2" "false" - test_config_cmd_set "--json" "beep3" "true" - test_config_cmd_set "--json" "beep3" "false" - test_config_cmd_set "--json" "Discovery" "$CONFIG_SET_JSON_TEST" - test_config_cmd_set "--json" "deep-not-defined.prop" "true" - test_config_cmd_set "--json" "deep-null" "null" - test_config_cmd_set "--json" "deep-null.prop" "true" - - test_expect_success "'ipfs config show' works" ' - ipfs config show >actual - ' - - test_expect_success "'ipfs config show' output looks good" ' - grep "\"beep\": \"boop\"," actual && - grep "\"beep1\": \"boop2\"," actual && - grep "\"beep2\": false," actual && - grep "\"beep3\": false," actual - ' - - test_expect_success "'ipfs config show --config-file' works" ' - mv "$IPFS_PATH/config" "$IPFS_PATH/config-moved" && - ipfs config --config-file "$IPFS_PATH/config-moved" show >moved && - test_cmp moved actual && - mv "$IPFS_PATH/config-moved" "$IPFS_PATH/config" - ' - - test_expect_success "setup for config replace test" ' - cp "$IPFS_PATH/config" newconfig.json && - sed -i"~" -e /PrivKey/d -e s/10GB/11GB/ newconfig.json && - sed -i"~" -e '"'"'/PeerID/ {'"'"' -e '"'"' s/,$// '"'"' -e '"'"' } '"'"' newconfig.json - ' - - test_expect_success "run 'ipfs config replace'" ' - ipfs config replace - < newconfig.json - ' - - test_expect_success "check resulting config after 'ipfs config replace'" ' - sed -e /PrivKey/d "$IPFS_PATH/config" > replconfig.json && - sed -i"~" -e '"'"'/PeerID/ {'"'"' -e '"'"' s/,$// '"'"' -e '"'"' } '"'"' replconfig.json && - test_cmp replconfig.json newconfig.json - ' - - # SECURITY - # Those tests are here to prevent exposing the PrivKey on the network - - test_expect_success "'ipfs config Identity' fails" ' - test_expect_code 1 ipfs config Identity 2> ident_out - ' - - test_expect_success "output looks good" ' - echo "Error: cannot show or change private key through API" > ident_exp && - test_cmp ident_exp ident_out - ' - - test_expect_success "'ipfs config Identity.PrivKey' fails" ' - test_expect_code 1 ipfs config Identity.PrivKey 2> ident_out - ' - - test_expect_success "output looks good" ' - test_cmp ident_exp ident_out - ' - - test_expect_success "lower cased PrivKey" ' - sed -i"~" -e '\''s/PrivKey/privkey/'\'' "$IPFS_PATH/config" && - test_expect_code 1 ipfs config Identity.privkey 2> ident_out - ' - - test_expect_success "output looks good" ' - test_cmp ident_exp ident_out - ' - - test_expect_success "fix it back" ' - sed -i"~" -e '\''s/privkey/PrivKey/'\'' "$IPFS_PATH/config" - ' - - test_expect_success "'ipfs config show' doesn't include privkey" ' - ipfs config show > show_config && - test_expect_code 1 grep PrivKey show_config - ' - - test_expect_success "'ipfs config replace' injects privkey back" ' - ipfs config replace show_config && - grep "\"PrivKey\":" "$IPFS_PATH/config" | grep -e ": \".\+\"" >/dev/null - ' - - test_expect_success "'ipfs config replace' with privkey errors out" ' - cp "$IPFS_PATH/config" real_config && - test_expect_code 1 ipfs config replace - < real_config 2> replace_out - ' - - test_expect_success "output looks good" ' - echo "Error: setting private key with API is not supported" > replace_expected - test_cmp replace_out replace_expected - ' - - test_expect_success "'ipfs config replace' with lower case privkey errors out" ' - cp "$IPFS_PATH/config" real_config && - sed -i -e '\''s/PrivKey/privkey/'\'' real_config && - test_expect_code 1 ipfs config replace - < real_config 2> replace_out - ' - - test_expect_success "output looks good" ' - echo "Error: setting private key with API is not supported" > replace_expected - test_cmp replace_out replace_expected - ' - - test_expect_success "'ipfs config Swarm.AddrFilters' looks good" ' - ipfs config Swarm.AddrFilters > actual_config && - test $(cat actual_config | wc -l) = 1 - ' - - test_expect_success "copy ipfs config" ' - cp "$IPFS_PATH/config" before_patch - ' - - test_expect_success "'ipfs config profile apply server' works" ' - ipfs config profile apply server - ' - - test_expect_success "backup was created and looks good" ' - test_cmp "$(find "$IPFS_PATH" -name "config-*")" before_patch - ' - - test_expect_success "'ipfs config Swarm.AddrFilters' looks good with server profile" ' - ipfs config Swarm.AddrFilters > actual_config && - test $(cat actual_config | wc -l) = 18 - ' - - test_expect_success "'ipfs config profile apply local-discovery' works" ' - ipfs config profile apply local-discovery - ' - - test_expect_success "'ipfs config Swarm.AddrFilters' looks good with applied local-discovery profile" ' - ipfs config Swarm.AddrFilters > actual_config && - test $(cat actual_config | wc -l) = 1 - ' - - test_profile_apply_revert server local-discovery - - # tests above mess with values this profile changes, need to do that before testing test profile - test_expect_success "ensure test profile is applied fully" ' - ipfs config profile apply test - ' - - # need to do this in reverse as the test profile is already applied in sharness - test_profile_apply_revert default-networking test - - test_profile_apply_dry_run_not_alter server - - test_profile_apply_dry_run_not_alter local-discovery - - test_profile_apply_dry_run_not_alter test - - test_expect_success "'ipfs config profile apply local-discovery --dry-run' looks good with different profile info" ' - ipfs config profile apply local-discovery --dry-run > diff_info && - test `grep "DisableNatPortMap" diff_info | wc -l` = 2 - ' - - test_expect_success "'ipfs config profile apply server --dry-run' looks good with same profile info" ' - ipfs config profile apply server --dry-run > diff_info && - test `grep "DisableNatPortMap" diff_info | wc -l` = 1 - ' - - test_expect_success "'ipfs config profile apply server' looks good with same profile info" ' - ipfs config profile apply server > diff_info && - test `grep "DisableNatPortMap" diff_info | wc -l` = 1 - ' - - test_expect_success "'ipfs config profile apply local-discovery' looks good with different profile info" ' - ipfs config profile apply local-discovery > diff_info && - test `grep "DisableNatPortMap" diff_info | wc -l` = 2 - ' - - test_expect_success "'ipfs config profile apply test' looks good with different profile info" ' - ipfs config profile apply test > diff_info && - test `grep "DisableNatPortMap" diff_info | wc -l` = 2 - ' - - test_expect_success "'ipfs config profile apply test --dry-run' doesn't include privkey" ' - ipfs config profile apply test --dry-run > show_config && - test_expect_code 1 grep PrivKey show_config - ' - - test_expect_success "'ipfs config profile apply test' doesn't include privkey" ' - ipfs config profile apply test > show_config && - test_expect_code 1 grep PrivKey show_config - ' - - # won't work as it changes datastore definition, which makes ipfs not launch - # without converting first - # test_profile_apply_revert pebbleds - - test_expect_success "cleanup config backups" ' - find "$IPFS_PATH" -name "config-*" -exec rm {} \; - ' -} - -test_init_ipfs - -# should work offline -test_config_cmd - -# should work online -test_launch_ipfs_daemon -test_config_cmd -test_kill_ipfs_daemon - - -test_done diff --git a/test/sharness/t0022-init-default.sh b/test/sharness/t0022-init-default.sh deleted file mode 100755 index ff1e2d07ec0..00000000000 --- a/test/sharness/t0022-init-default.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test init command with default config" - -. lib/test-lib.sh - -cfg_key="Addresses.API" -cfg_val="/ip4/0.0.0.0/tcp/5001" - -# test that init succeeds -test_expect_success "ipfs init succeeds" ' - export IPFS_PATH="$(pwd)/.ipfs" && - echo "IPFS_PATH: \"$IPFS_PATH\"" && - BITS="2048" && - ipfs init >actual_init || - test_fsh cat actual_init -' - -test_expect_success ".ipfs/config has been created" ' - test -f "$IPFS_PATH"/config || - test_fsh ls -al .ipfs -' - -test_expect_success "ipfs config succeeds" ' - ipfs config $cfg_flags "$cfg_key" "$cfg_val" -' - -test_expect_success "ipfs read config succeeds" ' - IPFS_DEFAULT_CONFIG=$(cat "$IPFS_PATH"/config) -' - -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_PATH" -' - -test_expect_success "ipfs init default config succeeds" ' - echo $IPFS_DEFAULT_CONFIG | ipfs init - >actual_init || - test_fsh cat actual_init -' - -test_expect_success "ipfs config output looks good" ' - echo "$cfg_val" >expected && - ipfs config "$cfg_key" >actual && - test_cmp expected actual -' - -test_done diff --git a/test/sharness/t0023-shutdown.sh b/test/sharness/t0023-shutdown.sh deleted file mode 100755 index 33a0ca3f16f..00000000000 --- a/test/sharness/t0023-shutdown.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test shutdown command" - -. lib/test-lib.sh - -test_init_ipfs - -test_launch_ipfs_daemon - -test_expect_success "shutdown succeeds" ' - ipfs shutdown -' - -test_expect_success "daemon no longer running" ' - for i in $(test_seq 1 100) - do - go-sleep 100ms - ! kill -0 $IPFS_PID 2>/dev/null && return - done -' - -test_launch_ipfs_daemon_without_network - -test_expect_success "shutdown succeeds" ' - ipfs shutdown -' - -test_expect_success "daemon no longer running" ' - for i in $(test_seq 1 100) - do - go-sleep 100ms - ! kill -0 $IPFS_PID 2>/dev/null && return - done -' -test_done diff --git a/test/sharness/t0024-datastore-config.sh b/test/sharness/t0024-datastore-config.sh deleted file mode 100755 index 3be75b30226..00000000000 --- a/test/sharness/t0024-datastore-config.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test datastore config" - -. lib/test-lib.sh - -test_init_ipfs - -test_launch_ipfs_daemon -test_kill_ipfs_daemon - -SPEC_NOSYNC=$(cat ../t0024-files/spec-nosync) - -SPEC_NEWSHARDFUN=$(cat ../t0024-files/spec-newshardfun) - -test_expect_success "change runtime value in spec config" ' - ipfs config --json Datastore.Spec "$SPEC_NOSYNC" -' - -test_launch_ipfs_daemon -test_kill_ipfs_daemon - -test_expect_success "change on-disk value in spec config" ' - ipfs config --json Datastore.Spec "$SPEC_NEWSHARDFUN" -' - -test_expect_success "can not launch daemon after on-disk value change" ' - test_must_fail ipfs daemon -' - -test_done diff --git a/test/sharness/t0025-datastores.sh b/test/sharness/t0025-datastores.sh deleted file mode 100755 index 6be9eb3ed48..00000000000 --- a/test/sharness/t0025-datastores.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test non-standard datastores" - -. lib/test-lib.sh - -profiles=("flatfs" "pebbleds" "badgerds") -proot="$(mktemp -d "${TMPDIR:-/tmp}/t0025.XXXXXX")" - -for profile in "${profiles[@]}"; do - test_expect_success "'ipfs init --empty-repo=false --profile=$profile' succeeds" ' - BITS="2048" && - IPFS_PATH="$proot/$profile" && - ipfs init --empty-repo=false --profile=$profile - ' - test_expect_success "'ipfs pin add' and 'pin ls' works with $profile" ' - export IPFS_PATH="$proot/$profile" && - echo -n "hello_$profile" | ipfs block put --pin=true > hello_cid && - ipfs pin ls -t recursive "$(cat hello_cid)" - ' -done - -test_done diff --git a/test/sharness/t0026-id.sh b/test/sharness/t0026-id.sh deleted file mode 100755 index 992892a39a6..00000000000 --- a/test/sharness/t0026-id.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test to make sure our identity information looks sane" - -. lib/test-lib.sh - -test_init_ipfs - -test_id_compute_agent() { - local AGENT_SUFFIX - AGENT_SUFFIX=$1 - AGENT_VERSION="$(ipfs version --number)" || return 1 - AGENT_COMMIT="$(ipfs version --number --commit)" || return 1 - if test "$AGENT_COMMIT" = "$AGENT_VERSION"; then - AGENT_COMMIT="" - else - AGENT_COMMIT="${AGENT_COMMIT##$AGENT_VERSION-}" - fi - AGENT_VERSION="kubo/$AGENT_VERSION/$AGENT_COMMIT" - if test -n "$AGENT_SUFFIX"; then - if test -n "$AGENT_COMMIT"; then - AGENT_VERSION="$AGENT_VERSION/" - fi - AGENT_VERSION="$AGENT_VERSION$AGENT_SUFFIX" - fi - echo "$AGENT_VERSION" -} - -test_expect_success "checking AgentVersion" ' - test_id_compute_agent > expected-agent-version && - ipfs id -f "\n" > actual-agent-version && - test_cmp expected-agent-version actual-agent-version -' - -test_expect_success "checking ID of self" ' - ipfs config Identity.PeerID > expected-id && - ipfs id -f "\n" > actual-id && - test_cmp expected-id actual-id -' - -test_expect_success "checking and converting ID of a random peer while offline" ' - # Peer ID taken from `t0140-swarm.sh` test. - echo k2k4r8ncs1yoluq95unsd7x2vfhgve0ncjoggwqx9vyh3vl8warrcp15 > expected-id && - ipfs id -f "\n" --peerid-base base36 --offline QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N > actual-id && - test_cmp expected-id actual-id -' - -# agent-version-suffix (local, offline) -test_launch_ipfs_daemon --agent-version-suffix=test-suffix -test_expect_success "checking AgentVersion with suffix (local)" ' - test_id_compute_agent test-suffix > expected-agent-version && - ipfs id -f "\n" > actual-agent-version && - test_cmp expected-agent-version actual-agent-version -' - -# agent-version-suffix (over libp2p identify protocol) -iptb testbed create -type localipfs -count 2 -init -startup_cluster 2 --agent-version-suffix=test-suffix-identify -test_expect_success "checking AgentVersion with suffix (fetched via libp2p identify protocol)" ' - ipfsi 0 id -f "\n" > expected-identify-agent-version && - ipfsi 1 id "$(ipfsi 0 config Identity.PeerID)" -f "\n" > actual-libp2p-identify-agent-version && - test_cmp expected-identify-agent-version actual-libp2p-identify-agent-version -' -iptb stop - -test_kill_ipfs_daemon - -# Version.AgentSuffix overrides --agent-version-suffix (local, offline) -test_expect_success "setting Version.AgentSuffix in config" ' - ipfs config Version.AgentSuffix json-config-suffix -' -test_launch_ipfs_daemon --agent-version-suffix=ignored-cli-suffix -test_expect_success "checking AgentVersion with suffix set via JSON config" ' - test_id_compute_agent json-config-suffix > expected-agent-version && - ipfs id -f "\n" > actual-agent-version && - test_cmp expected-agent-version actual-agent-version -' -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0027-rotate.sh b/test/sharness/t0027-rotate.sh deleted file mode 100755 index b3e748e90d1..00000000000 --- a/test/sharness/t0027-rotate.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test rotate command" - -. lib/test-lib.sh - -test_rotate() { - FROM_ALG=$1 - TO_ALG=$2 - - test_expect_success "ipfs init (from $FROM_ALG, to $TO_ALG)" ' - export IPFS_PATH="$(pwd)/.ipfs" && - case $FROM_ALG in - rsa) - ipfs init --profile=test -a=rsa > /dev/null - ;; - ed25519) - ipfs init --profile=test -a=ed25519 > /dev/null - ;; - *) - ipfs init --profile=test > /dev/null - ;; - esac - ' - - test_expect_success "Save first ID and key" ' - ipfs id -f="" > first_id && - ipfs id -f="" > first_key - ' - - test_launch_ipfs_daemon - - test_kill_ipfs_daemon - - test_expect_success "rotating keys" ' - case $TO_ALG in - rsa) - ipfs key rotate -t=rsa -s=2048 --oldkey=oldkey - ;; - ed25519) - ipfs key rotate -t=ed25519 --oldkey=oldkey - ;; - *) - ipfs key rotate --oldkey=oldkey - ;; - esac - ' - - test_expect_success "'ipfs key rotate -o self' should fail" ' - echo "Error: keystore name for back up cannot be named '\''self'\''" >expected-self - test_must_fail ipfs key rotate -o self 2>actual-self && - test_cmp expected-self actual-self - ' - - test_expect_success "Compare second ID and key to first" ' - ipfs id -f="" > second_id && - ipfs id -f="" > second_key && - ! test_cmp first_id second_id && - ! test_cmp first_key second_key - ' - - test_expect_success "checking ID" ' - ipfs config Identity.PeerID > expected-id && - ipfs id -f "\n" > actual-id && - ipfs key list -l --ipns-base=b58mh | grep self | cut -d " " -f1 > keystore-id && - ipfs key list -l --ipns-base=b58mh | grep oldkey | cut -d " " -f1 | tr -d "\n" > old-keystore-id && - test_cmp expected-id actual-id && - test_cmp expected-id keystore-id && - test_cmp old-keystore-id first_id - ' - - test_launch_ipfs_daemon - - test_expect_success "publish name with new and old keys" ' - echo "hello world" > msg && - ipfs add msg | cut -d " " -f2 | tr -d "\n" > msg_hash && - ipfs name publish --offline --allow-offline --key=self $(cat msg_hash) && - ipfs name publish --offline --allow-offline --key=oldkey $(cat msg_hash) - ' - - test_kill_ipfs_daemon - - test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_PATH" - ' - -} -test_rotate 'rsa' '' -test_rotate 'ed25519' '' -test_rotate '' '' -test_rotate 'rsa' 'rsa' -test_rotate 'ed25519' 'rsa' -test_rotate '' 'rsa' -test_rotate 'rsa' 'ed25519' -test_rotate 'ed25519' 'ed25519' -test_rotate '' 'ed25519' - -test_done diff --git a/test/sharness/t0030-mount.sh b/test/sharness/t0030-mount.sh deleted file mode 100755 index 0c0983d0c41..00000000000 --- a/test/sharness/t0030-mount.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test mount command" - -. lib/test-lib.sh - -# if in travis CI, don't test mount (no fuse) -if ! test_have_prereq FUSE; then - skip_all='skipping mount tests, fuse not available' - - test_done -fi - - -export IPFS_NS_MAP="welcome.example.com:/ipfs/$HASH_WELCOME_DOCS" - -# start iptb + wait for peering -NUM_NODES=5 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init -' -startup_cluster $NUM_NODES - -# test mount failure before mounting properly. -test_expect_success "'ipfs mount' fails when there is no mount dir" ' - tmp_ipfs_mount() { ipfsi 0 mount -f=not_ipfs -n=not_ipns >output 2>output.err; } && - test_must_fail tmp_ipfs_mount -' - -test_expect_success "'ipfs mount' output looks good" ' - test_must_be_empty output && - test_should_contain "not_ipns\|not_ipfs" output.err -' - -test_expect_success "setup and publish default IPNS value" ' - mkdir "$(pwd)/ipfs" "$(pwd)/ipns" && - ipfsi 0 name publish QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn -' - -# make sure stuff is unmounted first -# then mount properly -test_expect_success FUSE "'ipfs mount' succeeds" ' - do_umount "$(pwd)/ipfs" || true && - do_umount "$(pwd)/ipns" || true && - ipfsi 0 mount -f "$(pwd)/ipfs" -n "$(pwd)/ipns" >actual -' - -test_expect_success FUSE "'ipfs mount' output looks good" ' - echo "IPFS mounted at: $(pwd)/ipfs" >expected && - echo "IPNS mounted at: $(pwd)/ipns" >>expected && - test_cmp expected actual -' - -test_expect_success FUSE "local symlink works" ' - ipfsi 0 id -f"\n" > expected && - basename $(readlink ipns/local) > actual && - test_cmp expected actual -' - -test_expect_success FUSE "can resolve ipns names" ' - echo -n "ipfs" > expected && - cat ipns/welcome.example.com/ping > actual && - test_cmp expected actual -' - -test_expect_success "mount directories cannot be removed while active" ' - test_must_fail rmdir ipfs ipns 2>/dev/null -' - -test_expect_success "unmount directories" ' - do_umount "$(pwd)/ipfs" && - do_umount "$(pwd)/ipns" -' - -test_expect_success "mount directories can be removed after shutdown" ' - rmdir ipfs ipns -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0031-mount-publish.sh b/test/sharness/t0031-mount-publish.sh deleted file mode 100755 index 95b52bfe5e7..00000000000 --- a/test/sharness/t0031-mount-publish.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test mount command in conjunction with publishing" - -# imports -. lib/test-lib.sh - -# if in travis CI, don't test mount (no fuse) -if ! test_have_prereq FUSE; then - skip_all='skipping mount tests, fuse not available' - - test_done -fi - -test_init_ipfs - -# start iptb + wait for peering -NUM_NODES=3 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -force -init && - startup_cluster $NUM_NODES -' - -# pre-mount publish -HASH=$(echo 'hello warld' | ipfsi 0 add -Q -w --stdin-name "file") -test_expect_success "can publish before mounting /ipns" ' - ipfsi 0 name publish "$HASH" -' - -# mount -IPFS_MOUNT_DIR="$PWD/ipfs" -IPNS_MOUNT_DIR="$PWD/ipns" -test_expect_success FUSE "'ipfs mount' succeeds" ' - ipfsi 0 mount -f "'"$IPFS_MOUNT_DIR"'" -n "'"$IPNS_MOUNT_DIR"'" >actual -' -test_expect_success FUSE "'ipfs mount' output looks good" ' - echo "IPFS mounted at: $PWD/ipfs" >expected && - echo "IPNS mounted at: $PWD/ipns" >>expected && - test_cmp expected actual -' - -test_expect_success "cannot publish after mounting /ipns" ' - echo "Error: cannot manually publish while IPNS is mounted" >expected && - test_must_fail ipfsi 0 name publish '$HASH' 2>actual && - test_cmp expected actual -' - -test_expect_success "unmount /ipns out-of-band" ' - fusermount -u "'"$IPNS_MOUNT_DIR"'" -' - -test_expect_success "can publish after unmounting /ipns" ' - ipfsi 0 name publish '$HASH' -' - -# clean-up ipfs -test_expect_success "unmount /ipfs" ' - fusermount -u "'"$IPFS_MOUNT_DIR"'" -' -iptb stop - -test_done diff --git a/test/sharness/t0032-mount-sharded.sh b/test/sharness/t0032-mount-sharded.sh deleted file mode 100755 index 10ba421a225..00000000000 --- a/test/sharness/t0032-mount-sharded.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2021 Protocol Labs -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test mount command with sharding enabled" - -. lib/test-lib.sh - -if ! test_have_prereq FUSE; then - skip_all='skipping mount sharded tests, fuse not available' - test_done -fi - -test_init_ipfs - -test_expect_success 'force sharding' ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" -' - -test_launch_ipfs_daemon -test_mount_ipfs - -# we're testing nested subdirs which ensures that IPLD ADLs work -test_expect_success 'setup test data' ' - mkdir testdata && - echo a > testdata/a && - mkdir testdata/subdir && - echo b > testdata/subdir/b -' - -HASH=QmY59Ufw8zA2BxGPMTcfXg86JVed81Qbxeq5rDkHWSLN1m - -test_expect_success 'can add the data' ' - echo $HASH > expected_hash && - ipfs add -r -Q testdata > actual_hash && - test_cmp expected_hash actual_hash -' - -test_expect_success 'can read the data' ' - echo a > expected_a && - cat "ipfs/$HASH/a" > actual_a && - test_cmp expected_a actual_a && - echo b > expected_b && - cat "ipfs/$HASH/subdir/b" > actual_b && - test_cmp expected_b actual_b -' - -test_expect_success 'can list directories' ' - printf "a\nsubdir\n" > expected_ls && - ls -1 "ipfs/$HASH" > actual_ls && - test_cmp expected_ls actual_ls && - printf "b\n" > expected_ls_subdir && - ls -1 "ipfs/$HASH/subdir" > actual_ls_subdir && - test_cmp expected_ls_subdir actual_ls_subdir -' - -test_expect_success "unmount" ' - do_umount "$(pwd)/ipfs" && - do_umount "$(pwd)/ipns" -' - -test_expect_success 'cleanup' 'rmdir ipfs ipns' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0040-add-and-cat.sh b/test/sharness/t0040-add-and-cat.sh deleted file mode 100755 index c7232bedd8c..00000000000 --- a/test/sharness/t0040-add-and-cat.sh +++ /dev/null @@ -1,984 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test add and cat commands" - -. lib/test-lib.sh - -test_add_cat_file() { - test_expect_success "ipfs add --help works" ' - ipfs add --help 2> add_help_err1 > /dev/null - ' - - test_expect_success "stdin reading message doesn't show up" ' - test_expect_code 1 grep "ipfs: Reading from" add_help_err1 && - test_expect_code 1 grep "send Ctrl-d to stop." add_help_err1 - ' - - test_expect_success "ipfs help add works" ' - ipfs help add 2> add_help_err2 > /dev/null - ' - - test_expect_success "stdin reading message doesn't show up" ' - test_expect_code 1 grep "ipfs: Reading from" add_help_err2 && - test_expect_code 1 grep "send Ctrl-d to stop." add_help_err2 - ' - - test_expect_success "ipfs add succeeds" ' - echo "Hello Worlds!" >mountdir/hello.txt && - ipfs add mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --only-hash succeeds" ' - ipfs add --only-hash mountdir/hello.txt > oh_actual - ' - - test_expect_success "ipfs add --only-hash output looks good" ' - test_cmp expected oh_actual - ' - - test_expect_success "ipfs cat succeeds" ' - ipfs cat "$HASH" >actual - ' - - test_expect_success "ipfs cat output looks good" ' - echo "Hello Worlds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with offset succeeds" ' - ipfs cat --offset 10 "$HASH" >actual - ' - - test_expect_success "ipfs cat from offset output looks good" ' - echo "ds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat multiple hashes with offset succeeds" ' - ipfs cat --offset 10 "$HASH" "$HASH" >actual - ' - - test_expect_success "ipfs cat from offset output looks good" ' - echo "ds!" >expected && - echo "Hello Worlds!" >>expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat multiple hashes with offset succeeds" ' - ipfs cat --offset 16 "$HASH" "$HASH" >actual - ' - - test_expect_success "ipfs cat from offset output looks good" ' - echo "llo Worlds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat from negative offset should fail" ' - test_expect_code 1 ipfs cat --offset -102 "$HASH" > actual - ' - - test_expect_success "ipfs cat with length succeeds" ' - ipfs cat --length 8 "$HASH" >actual - ' - - test_expect_success "ipfs cat with length output looks good" ' - printf "Hello Wo" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat multiple hashes with offset and length succeeds" ' - ipfs cat --offset 5 --length 15 "$HASH" "$HASH" "$HASH" >actual - ' - - test_expect_success "ipfs cat multiple hashes with offset and length looks good" ' - printf " Worlds!\nHello " >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with exact length succeeds" ' - ipfs cat --length $(ipfs cat "$HASH" | wc -c) "$HASH" >actual - ' - - test_expect_success "ipfs cat with exact length looks good" ' - echo "Hello Worlds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with 0 length succeeds" ' - ipfs cat --length 0 "$HASH" >actual - ' - - test_expect_success "ipfs cat with 0 length looks good" ' - : >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with oversized length succeeds" ' - ipfs cat --length 100 "$HASH" >actual - ' - - test_expect_success "ipfs cat with oversized length looks good" ' - echo "Hello Worlds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with negative length should fail" ' - test_expect_code 1 ipfs cat --length -102 "$HASH" > actual - ' - - test_expect_success "ipfs cat /ipfs/file succeeds" ' - ipfs cat /ipfs/$HASH >actual - ' - - test_expect_success "output looks good" ' - echo "Hello Worlds!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add -t succeeds" ' - ipfs add -t mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add -t output looks good" ' - HASH="QmUkUQgxXeggyaD5Ckv8ZqfW8wHBX6cYyeiyqvVZYzq5Bi" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --chunker size-32 succeeds" ' - ipfs add --chunker rabin mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add --chunker size-32 output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --chunker size-64 succeeds" ' - ipfs add --chunker=size-64 mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add --chunker size-64 output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --chunker=size-0 failed" ' - test_expect_code 1 ipfs add -Q --chunker=size-0 mountdir/hello.txt - ' - - test_expect_success "ipfs add --chunker rabin-36-512-1024 succeeds" ' - ipfs add --chunker rabin-36-512-1024 mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add --chunker rabin-36-512-1024 output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --chunker rabin-12-512-1024 failed" ' - test_expect_code 1 ipfs add -Q --chunker rabin-12-512-1024 mountdir/hello.txt - ' - - test_expect_success "ipfs add --chunker buzhash succeeds" ' - ipfs add --chunker buzhash mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add --chunker buzhash output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add on hidden file succeeds" ' - echo "Hello Worlds!" >mountdir/.hello.txt && - ipfs add mountdir/.hello.txt >actual - ' - - test_expect_success "ipfs add on hidden file output looks good" ' - HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" && - echo "added $HASH .hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "add zero length file" ' - touch zero-length-file && - ZEROHASH=$(ipfs add -q zero-length-file) && - echo $ZEROHASH - ' - - test_expect_success "zero length file has correct hash" ' - test "$ZEROHASH" = QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH - ' - - test_expect_success "cat zero length file" ' - ipfs cat $ZEROHASH > zero-length-file_out - ' - - test_expect_success "make sure it looks good" ' - test_cmp zero-length-file zero-length-file_out - ' - - test_expect_success "ipfs add --stdin-name" ' - NAMEHASH="QmdFyxZXsFiP4csgfM5uPu99AvFiKH62CSPDw5TP92nr7w" && - echo "IPFS" | ipfs add --stdin-name file.txt > actual && - echo "added $NAMEHASH file.txt" > expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --stdin-name -w" ' - NAMEHASH="QmdFyxZXsFiP4csgfM5uPu99AvFiKH62CSPDw5TP92nr7w" && - echo "IPFS" | ipfs add -w --stdin-name file.txt | head -n1> actual && - echo "added $NAMEHASH file.txt" > expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with stdin-name" ' - NAMEHASH=$(echo "IPFS" | ipfs add -w --stdin-name file.txt -Q) && - ipfs cat /ipfs/$NAMEHASH/file.txt > expected && - echo "IPFS" > actual && - test_cmp expected actual - ' - - test_expect_success "ipfs add -r ." ' - mkdir test_current_dir && - echo "Hey" > test_current_dir/hey && - mkdir test_current_dir/hello && - echo "World" > test_current_dir/hello/world && - ( cd test_current_dir && - ipfs add -r -Q . > ../actual && cd ../ ) && - rm -r test_current_dir - ' - - test_expect_success "ipfs add -r . output looks good" ' - echo "QmZQWnfcqJ6hNkkPvrY9Q5X39GP3jUnUbAV4AbmbbR3Cb1" > expected - test_cmp expected actual - ' - - test_expect_success "ipfs add -r ./" ' - mkdir test_current_dir && - echo "Hey" > test_current_dir/hey && - mkdir test_current_dir/hello && - echo "World" > test_current_dir/hello/world && - ( cd test_current_dir && - ipfs add -r -Q ./ > ../actual && cd ../ ) && - rm -r test_current_dir - ' - - test_expect_success "ipfs add -r ./ output looks good" ' - echo "QmZQWnfcqJ6hNkkPvrY9Q5X39GP3jUnUbAV4AbmbbR3Cb1" > expected - test_cmp expected actual - ' - - # --cid-base=base32 - - test_expect_success "ipfs add --cid-base=base32 succeeds" ' - echo "base32 test" >mountdir/base32-test.txt && - ipfs add --cid-base=base32 mountdir/base32-test.txt >actual - ' - test_expect_success "ipfs add --cid-base=base32 output looks good" ' - HASHb32="bafybeibyosqxljd2eptb4ebbtvk7pb4aoxzqa6ttdsflty6rsslz5y6i34" && - echo "added $HASHb32 base32-test.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --cid-base=base32 --only-hash succeeds" ' - ipfs add --cid-base=base32 --only-hash mountdir/base32-test.txt > oh_actual - ' - test_expect_success "ipfs add --cid-base=base32 --only-hash output looks good" ' - test_cmp expected oh_actual - ' - - test_expect_success "ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false succeeds" ' - echo "base32 test" >mountdir/base32-test.txt && - ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false mountdir/base32-test.txt >actual - ' - test_expect_success "ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false output looks good" ' - HASHv0=$(cid-fmt -v 0 -b z %s "$HASHb32") && - echo "added $HASHv0 base32-test.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false --only-hash succeeds" ' - ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false --only-hash mountdir/base32-test.txt > oh_actual - ' - test_expect_success "ipfs add --cid-base=base32 --upgrade-cidv0-in-output=false --only-hash output looks good" ' - test_cmp expected oh_actual - ' - - test_expect_success "ipfs cat with base32 hash succeeds" ' - ipfs cat "$HASHb32" >actual - ' - test_expect_success "ipfs cat with base32 hash output looks good" ' - echo "base32 test" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat using CIDv0 hash succeeds" ' - ipfs cat "$HASHv0" >actual - ' - test_expect_success "ipfs cat using CIDv0 hash looks good" ' - echo "base32 test" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add with multiple files succeeds" ' - echo "Helloo Worlds!" >mountdir/hello2.txt && - ipfs add mountdir/hello.txt mountdir/hello2.txt >actual - ' - - test_expect_success "ipfs add with multiple files output looks good" ' - echo "added QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH hello.txt" >expected && - echo "added Qmf35k66MZNW2GijohUmXQEWKZU4cCGTCwK6idfnt152wJ hello2.txt" >> expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add with multiple files of same name and import dir succeeds" ' - ipfs add mountdir/hello.txt mountdir/hello.txt >actual - ' - - test_expect_success "ipfs add with multiple files of same name output looks good" ' - echo "added QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH hello.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add with multiple files of same name but different dirs fails" ' - mkdir -p mountdir/same-file/ && - cp mountdir/hello.txt mountdir/same-file/hello.txt && - test_expect_code 1 ipfs add mountdir/hello.txt mountdir/same-file/hello.txt >actual && - rm mountdir/same-file/hello.txt && - rmdir mountdir/same-file - ' - - ## --to-files with single source - - test_expect_success "ipfs add --to-files /mfspath succeeds" ' - mkdir -p mountdir && echo "Hello MFS!" > mountdir/mfs.txt && - ipfs add mountdir/mfs.txt --to-files /ipfs-add-to-files >actual - ' - - test_expect_success "ipfs add --to-files output looks good" ' - HASH_MFS="QmVT8bL3sGBA2TwvX8JPhrv5CYZL8LLLfW7mxkUjPZsgBr" && - echo "added $HASH_MFS mfs.txt" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs files read succeeds" ' - ipfs files read /ipfs-add-to-files >actual && - ipfs files rm /ipfs-add-to-files - ' - - test_expect_success "ipfs cat output looks good" ' - echo "Hello MFS!" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --to-files requires argument" ' - test_expect_code 1 ipfs add mountdir/mfs.txt --to-files >actual 2>&1 && - test_should_contain "Error: missing argument for option \"to-files\"" actual - ' - - test_expect_success "ipfs add --to-files / (MFS root) works" ' - echo "Hello MFS!" >expected && - ipfs add mountdir/mfs.txt --to-files / && - ipfs files read /mfs.txt >actual && - test_cmp expected actual && - ipfs files rm /mfs.txt && - rm mountdir/mfs.txt - ' - - ## --to-files with multiple sources - - test_expect_success "ipfs add file1 file2 --to-files /mfspath0 (without trailing slash) fails" ' - mkdir -p test && - echo "file1" > test/mfs1.txt && - echo "file2" > test/mfs2.txt && - test_expect_code 1 ipfs add test/mfs1.txt test/mfs2.txt --to-files /mfspath0 >actual 2>&1 && - test_should_contain "MFS destination is a file: only one entry can be copied to \"/mfspath0\"" actual && - ipfs files rm -r --force /mfspath0 - ' - - test_expect_success "ipfs add file1 file2 --to-files /mfsfile1 (without trailing slash + with preexisting file) fails" ' - echo test | ipfs files write --create /mfsfile1 && - test_expect_code 1 ipfs add test/mfs1.txt test/mfs2.txt --to-files /mfsfile1 >actual 2>&1 && - test_should_contain "Error: to-files: cannot put node in path \"/mfsfile1\"" actual && - ipfs files rm -r --force /mfsfile1 - ' - - test_expect_success "ipfs add file1 file2 --to-files /mfsdir1 (without trailing slash + with preexisting dir) fails" ' - ipfs files mkdir -p /mfsdir1 && - test_expect_code 1 ipfs add test/mfs1.txt test/mfs2.txt --to-files /mfsdir1 >actual 2>&1 && - test_should_contain "Error: to-files: cannot put node in path \"/mfsdir1\"" actual && - ipfs files rm -r --force /mfsdir1 - ' - - test_expect_success "ipfs add file1 file2 --to-files /mfsdir2/ (with trailing slash) succeeds" ' - ipfs files mkdir -p /mfsdir2 && - test_expect_code 0 ipfs add --cid-version 1 test/mfs1.txt test/mfs2.txt --to-files /mfsdir2/ > actual 2>&1 && - test_should_contain "added bafkreihm3rktn5z33luic3youqdsn326toaq3ekesmdvsa53sbrd3f5r3a mfs1.txt" actual && - test_should_contain "added bafkreidh5zkhr2vnwa2luwmuj24xo6l3jhfgvkgtk5cyp43oxs7owzpxby mfs2.txt" actual && - test_should_not_contain "Error" actual && - ipfs files ls /mfsdir2/ > lsout && - test_should_contain "mfs1.txt" lsout && - test_should_contain "mfs2.txt" lsout && - ipfs files rm -r --force /mfsdir2 - ' - - test_expect_success "ipfs add file1 file2 --to-files /mfsfile2/ (with trailing slash + with preexisting file) fails" ' - echo test | ipfs files write --create /mfsfile2 && - test_expect_code 1 ipfs add test/mfs1.txt test/mfs2.txt --to-files /mfsfile2/ >actual 2>&1 && - test_should_contain "Error: to-files: MFS destination \"/mfsfile2/\" is not a directory" actual && - ipfs files rm -r --force /mfsfile2 - ' - - ## --to-files with recursive dir - - # test MFS destination without trailing slash - test_expect_success "ipfs add with --to-files /mfs/subdir3 fails because /mfs/subdir3 exists" ' - ipfs files mkdir -p /mfs/subdir3 && - test_expect_code 1 ipfs add -r test --to-files /mfs/subdir3 >actual 2>&1 && - test_should_contain "cannot put node in path \"/mfs/subdir3\": directory already has entry by that name" actual && - ipfs files rm -r --force /mfs - ' - - # test recursive import of a dir into MFS subdirectory - test_expect_success "ipfs add -r dir --to-files /mfs/subdir4/ succeeds (because of trailing slash)" ' - ipfs files mkdir -p /mfs/subdir4 && - ipfs add --cid-version 1 -r test --to-files /mfs/subdir4/ >actual 2>&1 && - test_should_contain "added bafkreihm3rktn5z33luic3youqdsn326toaq3ekesmdvsa53sbrd3f5r3a test/mfs1.txt" actual && - test_should_contain "added bafkreidh5zkhr2vnwa2luwmuj24xo6l3jhfgvkgtk5cyp43oxs7owzpxby test/mfs2.txt" actual && - test_should_contain "added bafybeic7xwqwovt4g4bax6d3udp6222i63vj2rblpbim7uy2uw4a5gahha test" actual && - test_should_not_contain "Error" actual - ipfs files ls /mfs/subdir4/ > lsout && - test_should_contain "test" lsout && - test_should_not_contain "mfs1.txt" lsout && - test_should_not_contain "mfs2.txt" lsout && - ipfs files rm -r --force /mfs - ' - - # confirm -w and --to-files are exclusive - # context: https://github.com/ipfs/kubo/issues/10611 - test_expect_success "ipfs add -r -w dir --to-files /mfs/subdir5/ errors (-w and --to-files are exclusive)" ' - ipfs files mkdir -p /mfs/subdir5 && - test_expect_code 1 ipfs add -r -w test --to-files /mfs/subdir5/ >actual 2>&1 && - test_should_contain "Error" actual && - ipfs files rm -r --force /mfs - ' - -} - -test_add_cat_5MB() { - ADD_FLAGS="$1" - EXP_HASH="$2" - - test_expect_success "generate 5MB file using go-random" ' - random 5242880 41 >mountdir/bigfile - ' - - test_expect_success "sha1 of the file looks ok" ' - echo "11145620fb92eb5a49c9986b5c6844efda37e471660e" >sha1_expected && - multihash -a=sha1 -e=hex mountdir/bigfile >sha1_actual && - test_cmp sha1_expected sha1_actual - ' - - test_expect_success "'ipfs add $ADD_FLAGS bigfile' succeeds" ' - ipfs add $ADD_FLAGS mountdir/bigfile >actual || - test_fsh cat daemon_err - ' - - test_expect_success "'ipfs add bigfile' output looks good" ' - echo "added $EXP_HASH bigfile" >expected && - test_cmp expected actual - ' - test_expect_success "'ipfs cat' succeeds" ' - ipfs cat "$EXP_HASH" >actual - ' - - test_expect_success "'ipfs cat' output looks good" ' - test_cmp mountdir/bigfile actual - ' - - test_expect_success FUSE "cat ipfs/bigfile succeeds" ' - cat "ipfs/$EXP_HASH" >actual - ' - - test_expect_success FUSE "cat ipfs/bigfile looks good" ' - test_cmp mountdir/bigfile actual - ' - - test_expect_success "remove hash" ' - ipfs pin rm "$EXP_HASH" && - ipfs block rm "$EXP_HASH" - ' - - test_expect_success "get base32 version of CID" ' - ipfs cid base32 $EXP_HASH > base32_cid && - BASE32_HASH=`cat base32_cid` - ' - - test_expect_success "ipfs add --cid-base=base32 bigfile' succeeds" ' - ipfs add $ADD_FLAGS --cid-base=base32 mountdir/bigfile >actual || - test_fsh cat daemon_err - ' - - test_expect_success "'ipfs add bigfile --cid-base=base32' output looks good" ' - echo "added $BASE32_HASH bigfile" >expected && - test_cmp expected actual - ' - - test_expect_success "'ipfs cat $BASE32_HASH' succeeds" ' - ipfs cat "$BASE32_HASH" >actual - ' -} - -test_add_cat_raw() { - test_expect_success "add a small file with raw-leaves" ' - echo "foobar" > afile && - HASH=$(ipfs add -q --raw-leaves afile) - ' - - test_expect_success "cat that small file" ' - ipfs cat $HASH > afile_out - ' - - test_expect_success "make sure it looks good" ' - test_cmp afile afile_out - ' - - test_expect_success "add zero length file with raw-leaves" ' - touch zero-length-file && - ZEROHASH=$(ipfs add -q --raw-leaves zero-length-file) && - echo $ZEROHASH - ' - - test_expect_success "zero length file has correct hash" ' - test "$ZEROHASH" = bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku - ' - - test_expect_success "cat zero length file" ' - ipfs cat $ZEROHASH > zero-length-file_out - ' - - test_expect_success "make sure it looks good" ' - test_cmp zero-length-file zero-length-file_out - ' -} - -test_add_cat_derefargs() { - test_expect_success "create and hash zero length file" ' - touch zero-length-file && - ZEROHASH=$(ipfs add -q -n zero-length-file) - ' - - test_expect_success "create symlink and add with dereferenced arguments" ' - ln -s zero-length-file symlink-to-zero && - HASH=$(ipfs add -q -n --dereference-args symlink-to-zero) && - test $HASH = $ZEROHASH - ' -} - -test_add_cat_expensive() { - ADD_FLAGS="$1" - HASH="$2" - - test_expect_success EXPENSIVE "generate 100MB file using go-random" ' - random 104857600 42 >mountdir/bigfile - ' - - test_expect_success EXPENSIVE "sha1 of the file looks ok" ' - echo "1114885b197b01e0f7ff584458dc236cb9477d2e736d" >sha1_expected && - multihash -a=sha1 -e=hex mountdir/bigfile >sha1_actual && - test_cmp sha1_expected sha1_actual - ' - - test_expect_success EXPENSIVE "ipfs add $ADD_FLAGS bigfile succeeds" ' - ipfs add $ADD_FLAGS mountdir/bigfile >actual - ' - - test_expect_success EXPENSIVE "ipfs add bigfile output looks good" ' - echo "added $HASH bigfile" >expected && - test_cmp expected actual - ' - - test_expect_success EXPENSIVE "ipfs cat succeeds" ' - ipfs cat "$HASH" | multihash -a=sha1 -e=hex >sha1_actual - ' - - test_expect_success EXPENSIVE "ipfs cat output looks good" ' - ipfs cat "$HASH" >actual && - test_cmp mountdir/bigfile actual - ' - - test_expect_success EXPENSIVE "ipfs cat output hashed looks good" ' - echo "1114885b197b01e0f7ff584458dc236cb9477d2e736d" >sha1_expected && - test_cmp sha1_expected sha1_actual - ' - - test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile succeeds" ' - cat "ipfs/$HASH" | multihash -a=sha1 -e=hex >sha1_actual - ' - - test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile looks good" ' - test_cmp sha1_expected sha1_actual - ' -} - -test_add_named_pipe() { - test_expect_success "Adding named pipes explicitly works" ' - mkfifo named-pipe1 && - ( echo foo > named-pipe1 & echo "added $( echo foo | ipfs add -nq ) named-pipe1" > expected_named_pipes_add ) && - mkfifo named-pipe2 && - ( echo bar > named-pipe2 & echo "added $( echo bar | ipfs add -nq ) named-pipe2" >> expected_named_pipes_add ) && - ipfs add -n named-pipe1 named-pipe2 >actual_pipe_add && - rm named-pipe1 && - rm named-pipe2 && - test_cmp expected_named_pipes_add actual_pipe_add - ' - - test_expect_success "useful error message when recursively adding a named pipe" ' - mkdir -p named-pipe-dir && - mkfifo named-pipe-dir/named-pipe && - STAT=$(generic_stat named-pipe-dir/named-pipe) && - test_expect_code 1 ipfs add -r named-pipe-dir 2>actual && - printf "Error: unrecognized file type for named-pipe-dir/named-pipe: $STAT\n" >expected && - rm named-pipe-dir/named-pipe && - rmdir named-pipe-dir && - test_cmp expected actual - ' -} - -test_add_pwd_is_symlink() { - test_expect_success "ipfs add -r adds directory content when ./ is symlink" ' - mkdir hellodir && - echo "World" > hellodir/world && - ln -s hellodir hellolink && - ( cd hellolink && - ipfs add -r . > ../actual ) && - grep "added Qma9CyFdG5ffrZCcYSin2uAETygB25cswVwEYYzwfQuhTe" actual && - rm -r hellodir - ' -} - -test_launch_ipfs_daemon_and_mount - -test_expect_success "'ipfs add --help' succeeds" ' - ipfs add --help >actual -' - -test_expect_success "'ipfs add --help' output looks good" ' - egrep "ipfs add.*" actual >/dev/null || - test_fsh cat actual -' - -test_expect_success "'ipfs help add' succeeds" ' - ipfs help add >actual -' - -test_expect_success "'ipfs help add' output looks good" ' - egrep "ipfs add.*" actual >/dev/null || - test_fsh cat actual -' - -test_expect_success "'ipfs cat --help' succeeds" ' - ipfs cat --help >actual -' - -test_expect_success "'ipfs cat --help' output looks good" ' - egrep "ipfs cat.*" actual >/dev/null || - test_fsh cat actual -' - -test_expect_success "'ipfs help cat' succeeds" ' - ipfs help cat >actual -' - -test_expect_success "'ipfs help cat' output looks good" ' - egrep "ipfs cat.*" actual >/dev/null || - test_fsh cat actual -' - -test_add_cat_file - -test_expect_success "ipfs cat succeeds with stdin opened (issue #1141)" ' - cat mountdir/hello.txt | while read line; do ipfs cat "$HASH" >actual || exit; done -' - -test_expect_success "ipfs cat output looks good" ' - cat mountdir/hello.txt >expected && - test_cmp expected actual -' - -test_expect_success "ipfs cat accept hash from built input" ' - echo "$HASH" | ipfs cat >actual -' - -test_expect_success "ipfs cat output looks good" ' - test_cmp expected actual -' - -test_expect_success FUSE "cat ipfs/stuff succeeds" ' - cat "ipfs/$HASH" >actual -' - -test_expect_success FUSE "cat ipfs/stuff looks good" ' - test_cmp expected actual -' - -test_expect_success "'ipfs add -q' succeeds" ' - echo "Hello Venus!" >mountdir/venus.txt && - ipfs add -q mountdir/venus.txt >actual -' - -test_expect_success "'ipfs add -q' output looks good" ' - HASH="QmU5kp3BH3B8tnWUU2Pikdb2maksBNkb92FHRr56hyghh4" && - echo "$HASH" >expected && - test_cmp expected actual -' - -test_expect_success "'ipfs add -q' with stdin input succeeds" ' - echo "Hello Jupiter!" | ipfs add -q >actual -' - -test_expect_success "'ipfs add -q' output looks good" ' - HASH="QmUnvPcBctVTAcJpigv6KMqDvmDewksPWrNVoy1E1WP5fh" && - echo "$HASH" >expected && - test_cmp expected actual -' - -test_expect_success "'ipfs cat' succeeds" ' - ipfs cat "$HASH" >actual -' - -test_expect_success "ipfs cat output looks good" ' - echo "Hello Jupiter!" >expected && - test_cmp expected actual -' - -test_expect_success "'ipfs add' with stdin input succeeds" ' - printf "Hello Neptune!\nHello Pluton!" | ipfs add >actual -' - -test_expect_success "'ipfs add' output looks good" ' - HASH="QmZDhWpi8NvKrekaYYhxKCdNVGWsFFe1CREnAjP1QbPaB3" && - echo "added $HASH $HASH" >expected && - test_cmp expected actual -' - -test_expect_success "'ipfs cat' with built input succeeds" ' - echo "$HASH" | ipfs cat >actual -' - -test_expect_success "ipfs cat with built input output looks good" ' - printf "Hello Neptune!\nHello Pluton!" >expected && - test_cmp expected actual -' - -add_directory() { - EXTRA_ARGS=$1 - - test_expect_success "'ipfs add -r $EXTRA_ARGS' succeeds" ' - mkdir mountdir/planets && - echo "Hello Mars!" >mountdir/planets/mars.txt && - echo "Hello Venus!" >mountdir/planets/venus.txt && - ipfs add -r $EXTRA_ARGS mountdir/planets >actual - ' - - test_expect_success "'ipfs add -r $EXTRA_ARGS' output looks good" ' - echo "added $MARS planets/mars.txt" >expected && - echo "added $VENUS planets/venus.txt" >>expected && - echo "added $PLANETS planets" >>expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat accept many hashes from built input" ' - { echo "$MARS"; echo "$VENUS"; } | ipfs cat >actual - ' - - test_expect_success "ipfs cat output looks good" ' - cat mountdir/planets/mars.txt mountdir/planets/venus.txt >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat accept many hashes as args" ' - ipfs cat "$MARS" "$VENUS" >actual - ' - - test_expect_success "ipfs cat output looks good" ' - test_cmp expected actual - ' - - test_expect_success "ipfs cat with both arg and stdin" ' - echo "$MARS" | ipfs cat "$VENUS" >actual - ' - - test_expect_success "ipfs cat output looks good" ' - cat mountdir/planets/venus.txt >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs cat with two args and stdin" ' - echo "$MARS" | ipfs cat "$VENUS" "$VENUS" >actual - ' - - test_expect_success "ipfs cat output looks good" ' - cat mountdir/planets/venus.txt mountdir/planets/venus.txt >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs add --quieter succeeds" ' - ipfs add -r -Q $EXTRA_ARGS mountdir/planets >actual - ' - - test_expect_success "ipfs add --quieter returns only one correct hash" ' - echo "$PLANETS" > expected && - test_cmp expected actual - ' - - test_expect_success "cleanup" ' - rm -r mountdir/planets - ' -} - -PLANETS="QmWSgS32xQEcXMeqd3YPJLrNBLSdsfYCep2U7CFkyrjXwY" -MARS="QmPrrHqJzto9m7SyiRzarwkqPcCSsKR2EB1AyqJfe8L8tN" -VENUS="QmU5kp3BH3B8tnWUU2Pikdb2maksBNkb92FHRr56hyghh4" -add_directory - -PLANETS="QmfWfQfKCY5Ukv9peBbxM5vqWM9BzmqUSXvdCgjT2wsiBT" -MARS="bafkreibmlvvgdyihetgocpof6xk64kjjzdeq2e4c7hqs3krdheosk4tgj4" -VENUS="bafkreihfsphazrk2ilejpekyltjeh5k4yvwgjuwg26ueafohqioeo3sdca" -add_directory '--raw-leaves' - -PLANETS="bafybeih7e5dmkyk25up5vxug4q3hrg2fxbzf23dfrac2fns5h7z4aa7ioi" -MARS="bafkreibmlvvgdyihetgocpof6xk64kjjzdeq2e4c7hqs3krdheosk4tgj4" -VENUS="bafkreihfsphazrk2ilejpekyltjeh5k4yvwgjuwg26ueafohqioeo3sdca" -add_directory '--cid-version=1' - -PLANETS="bafybeif5tuep5ap2d7zyhbktucey75aoacxufgt6i3v4gebmixyipnyp7y" -MARS="bafybeiawta2ntdmsy24aro35w3homzl4ak7svr3si7l7gesvq4erglyye4" -VENUS="bafybeicvkvhs2fr75ynebtdjqpgm4g2fc63abqbmysupwpmcjl4gx7mzrm" -add_directory '--cid-version=1 --raw-leaves=false' - -PLANETS="bafykbzaceaptbcs7ik5mdfpot3b4ackvxlwh7loc5jcrtkayf64ukl7zyk46e" -MARS="bafk2bzaceaqcxw46uzkyd2jmczoogof6pnkqt4dpiv3pwkunsv4g5rkkmecie" -VENUS="bafk2bzacebxnke2fb5mgzxyjuuavvcfht4fd3gvn4klkujz6k72wboynhuvfw" -add_directory '--hash=blake2b-256' - -test_expect_success "'ipfs add -rn' succeeds" ' - mkdir -p mountdir/moons/jupiter && - mkdir -p mountdir/moons/saturn && - echo "Hello Europa!" >mountdir/moons/jupiter/europa.txt && - echo "Hello Titan!" >mountdir/moons/saturn/titan.txt && - echo "hey youre no moon!" >mountdir/moons/mercury.txt && - ipfs add -rn mountdir/moons >actual -' - -test_expect_success "'ipfs add -rn' output looks good" ' - MOONS="QmVKvomp91nMih5j6hYBA8KjbiaYvEetU2Q7KvtZkLe9nQ" && - EUROPA="Qmbjg7zWdqdMaK2BucPncJQDxiALExph5k3NkQv5RHpccu" && - JUPITER="QmS5mZddhFPLWFX3w6FzAy9QxyYkaxvUpsWCtZ3r7jub9J" && - SATURN="QmaMagZT4rTE7Nonw8KGSK4oe1bh533yhZrCo1HihSG8FK" && - TITAN="QmZzppb9WHn552rmRqpPfgU5FEiHH6gDwi3MrB9cTdPwdb" && - MERCURY="QmUJjVtnN8YEeYcS8VmUeWffTWhnMQAkk5DzZdKnPhqUdK" && - echo "added $EUROPA moons/jupiter/europa.txt" >expected && - echo "added $MERCURY moons/mercury.txt" >>expected && - echo "added $TITAN moons/saturn/titan.txt" >>expected && - echo "added $JUPITER moons/jupiter" >>expected && - echo "added $SATURN moons/saturn" >>expected && - echo "added $MOONS moons" >>expected && - test_cmp expected actual -' - -test_expect_success "go-random is installed" ' - type random -' - -test_add_cat_5MB "" "QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb" - -test_add_cat_5MB --raw-leaves "QmbdLHCmdi48eM8T7D67oXjA1S2Puo8eMfngdHhdPukFd6" - -# note: the specified hash implies that internal nodes are stored -# using CidV1 and leaves are stored using raw blocks -test_add_cat_5MB --cid-version=1 "bafybeigfnx3tka2rf5ovv2slb7ymrt4zbwa3ryeqibe6fipyt5vgsrli3u" - -# note: the specified hash implies that internal nodes are stored -# using CidV1 and leaves are stored using CidV1 but using the legacy -# format (i.e. not raw) -test_add_cat_5MB '--cid-version=1 --raw-leaves=false' "bafybeieyifrgpjn3yengthr7qaj72ozm2aq3wm53srgeprc43w67qpvfqa" - -# note: --hash=blake2b-256 implies --cid-version=1 which implies --raw-leaves=true -# the specified hash represents the leaf nodes stored as raw leaves and -# encoded with the blake2b-256 hash function -test_add_cat_5MB '--hash=blake2b-256' "bafykbzacebnmjcl4sn37b3ehtibvf263oun2w6idghenrvlpehq5w5jqyvhjo" - -# the specified hash represents the leaf nodes stored as protoful nodes and -# encoded with the blake2b-256 hash function -test_add_cat_5MB '--hash=blake2b-256 --raw-leaves=false' "bafykbzaceaxiiykzgpbhnzlecffqm3zbuvhujyvxe5scltksyafagkyw4rjn2" - -test_add_cat_expensive "" "QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3" - -# note: the specified hash implies that internal nodes are stored -# using CidV1 and leaves are stored using raw blocks -test_add_cat_expensive "--cid-version=1" "bafybeidkj5ecbhrqmzrcee2rw7qwsx24z3364qya3fnp2ktkg2tnsrewhi" - -# note: --hash=blake2b-256 implies --cid-version=1 which implies --raw-leaves=true -# the specified hash represents the leaf nodes stored as raw leaves and -# encoded with the blake2b-256 hash function -test_add_cat_expensive '--hash=blake2b-256' "bafykbzaceb26fnq5hz5iopzamcb4yqykya5x6a4nvzdmcyuu4rj2akzs3z7r6" - -test_add_named_pipe - -test_add_pwd_is_symlink - -test_add_cat_raw - -test_expect_success "ipfs add --cid-version=9 fails" ' - echo "context" > afile.txt && - test_must_fail ipfs add --cid-version=9 afile.txt 2>&1 | tee add_out && - grep -q "unknown CID version" add_out -' - -test_kill_ipfs_daemon - -# should work offline - -test_add_cat_file - -test_add_cat_raw - -test_expect_success "ipfs add --only-hash succeeds" ' - echo "unknown content for only-hash" | ipfs add --only-hash -q > oh_hash -' - -test_add_cat_derefargs - -#TODO: this doesn't work when online hence separated out from test_add_cat_file -test_expect_success "ipfs cat file fails" ' - test_must_fail ipfs cat $(cat oh_hash) -' - -test_add_named_pipe - -test_add_pwd_is_symlink - -# Test daemon in offline mode -test_launch_ipfs_daemon_without_network - -test_add_cat_file - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0042-add-skip.sh b/test/sharness/t0042-add-skip.sh deleted file mode 100755 index 64d8e1a7c41..00000000000 --- a/test/sharness/t0042-add-skip.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test add and cat commands" - -. lib/test-lib.sh - -test_add_skip() { - - test_expect_success "'ipfs add -r' with hidden file succeeds" ' - mkdir -p mountdir/planets/.asteroids && - echo "mars.txt" >mountdir/planets/.gitignore && - echo "Hello Mars" >mountdir/planets/mars.txt && - echo "Hello Venus" >mountdir/planets/venus.txt && - echo "Hello Pluto" >mountdir/planets/.pluto.txt && - echo "Hello Charon" >mountdir/planets/.charon.txt && - echo "Hello Ceres" >mountdir/planets/.asteroids/ceres.txt && - echo "Hello Pallas" >mountdir/planets/.asteroids/pallas.txt && - ipfs add -r mountdir/planets >actual - ' - - test_expect_success "'ipfs add -r' did not include . files" ' - cat >expected <<-\EOF && -added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV planets/mars.txt -added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt -added QmR8nD1Vzk5twWVC6oShTHvv7mMYkVh6dApCByBJyV2oj3 planets -EOF - test_cmp expected actual - ' - - test_expect_success "'ipfs add -r --hidden' succeeds" ' - ipfs add -r --hidden mountdir/planets >actual - ' - - test_expect_success "'ipfs add -r --hidden' did include . files" ' - cat >expected <<-\EOF && -added QmcAREBcjgnUpKfyFmUGnfajA1NQS5ydqRp7WfqZ6JF8Dx planets/.asteroids/ceres.txt -added QmZ5eaLybJ5GUZBNwy24AA9EEDTDpA4B8qXnuN3cGxu2uF planets/.asteroids/pallas.txt -added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a planets/.charon.txt -added QmPHrRjTH8FskN3C2iv6BLekDT94o23KSL2u5qLqQqGhVH planets/.gitignore -added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF planets/.pluto.txt -added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV planets/mars.txt -added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt -added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV planets/.asteroids -added QmczhHaXyb3bc9APMxe4MXbr87V5YDLKLaw3DZX3fK7HrK planets -EOF - test_cmp expected actual - ' - - test_expect_success "'ipfs add -r --ignore-rules-path=.gitignore --hidden' succeeds" ' - (cd mountdir/planets && ipfs add -r --ignore-rules-path=.gitignore --hidden .) > actual - ' - - test_expect_success "'ipfs add -r --ignore-rules-path=.gitignore --hidden' did not include mars.txt file" ' - cat >expected <<-\EOF && -added QmcAREBcjgnUpKfyFmUGnfajA1NQS5ydqRp7WfqZ6JF8Dx planets/.asteroids/ceres.txt -added QmZ5eaLybJ5GUZBNwy24AA9EEDTDpA4B8qXnuN3cGxu2uF planets/.asteroids/pallas.txt -added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a planets/.charon.txt -added QmPHrRjTH8FskN3C2iv6BLekDT94o23KSL2u5qLqQqGhVH planets/.gitignore -added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF planets/.pluto.txt -added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt -added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV planets/.asteroids -added QmaRsiaCYvc65RqHVAcv2tqyjZgQYgvaNqW1tQGsjfy4N5 planets -EOF - test_cmp expected actual - ' - - test_expect_success "'ipfs add -r --ignore-rules-path=.gitignore --ignore .asteroids --ignore venus.txt --hidden' succeeds" ' - (cd mountdir/planets && ipfs add -r --ignore-rules-path=.gitignore --ignore .asteroids --ignore venus.txt --hidden .) > actual - ' - - test_expect_success "'ipfs add -r --ignore-rules-path=.gitignore --ignore .asteroids --ignore venus.txt --hidden' did not include ignored files" ' - cat >expected <<-\EOF && -added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a planets/.charon.txt -added QmPHrRjTH8FskN3C2iv6BLekDT94o23KSL2u5qLqQqGhVH planets/.gitignore -added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF planets/.pluto.txt -added QmemuMahjSh7eYLY3hbz2q8sqMPnbQzBQeUdosqNiWChE6 planets -EOF - test_cmp expected actual - ' - - test_expect_success "'ipfs add' includes hidden files given explicitly even without --hidden" ' - mkdir -p mountdir/dotfiles && - echo "set nocompatible" > mountdir/dotfiles/.vimrc - cat >expected <<-\EOF && -added QmT4uMRDCN7EMpFeqwvKkboszbqeW1kWVGrBxBuCGqZcQc .vimrc -EOF - ipfs add mountdir/dotfiles/.vimrc >actual - cat actual - test_cmp expected actual - ' - - test_expect_failure "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" ' - ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets - ' - -} - -# should work offline -test_init_ipfs -test_add_skip - -# should work online -test_launch_ipfs_daemon -test_add_skip -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0043-add-w.sh b/test/sharness/t0043-add-w.sh deleted file mode 100755 index 1f13cae3a65..00000000000 --- a/test/sharness/t0043-add-w.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test add -w" - -add_w_m='QmazHkwx6mPmmCEi1jR5YzjjQd1g5XzKfYQLzRAg7x5uUk' - -add_w_1='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added Qmf82PSsMpUHcrqxa69KG6Qp5yeK7K9BTizXgG3nvzWcNG ' - -add_w_12='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead -added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' - -add_w_21='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead -added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' - -add_w_d1='added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs -added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak- -added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r -added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx -added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy -added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 -added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN ' - -add_w_d1_v1='added bafkreif7rizm7yeem72okzlwr2ls73cyemfyv5mjghdew3kzhtfznzz4dq _jo7/-s782qgs -added bafkreifkecyeevzcocvjliaz3ssiej5tkp32xyuogizonybihapdzovlsu _jo7/15totauzkak- -added bafkreif5xhyhjhqp3muvj52wp37nutafsznckeuhikrl3h6w2sx3xdyeqm _jo7/galecuirrj4r -added bafkreia6ooswgjtadq5n5zxkn2qyw3dpuyutvam7grtxn36ywykv52vkje _jo7/mzo50r-1xidf5zx -added bafkreibhvbkg6zgra4bu56a36h25g52g6yxsb25qvgqv2trx4zbmhkmxku _jo7/wzvsihy -added bafybeietuhja6ipwwnxefjecz6c5yls4j4q7r5gxiesyzfzkwsaimpa5mu _jo7 -added bafybeihxnrujsxdwyzuf3rq6wigzitrj6vjvxphttrtsx6tqabzpqfbd54 ' - -add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34 -added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx -added QmfYmpCCAMU9nLe7xbrYsHf5z2R2GxeQnsm4zavUhX9vq2 gnz66h/9ximv51cbo8 -added QmWgEE4e2kfx3b8HZcBk5cLrfhoi8kTMQP2MipgPhykuV3 gnz66h/b54ygh6gs -added QmcLbqEqhREGednc6mrVtanee4WHKp5JnUfiwTTHCJwuDf gnz66h/lbl5 -added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs -added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak- -added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r -added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx -added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy -added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 h3qpecj0 -added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 -added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x gnz66h -added QmTmc46fhKC8Liuh5soy1VotdnHcqLu3r6HpPGwDZCnqL1 ' - -add_w_r='QmcCksBMDuuyuyfAMMNzEAx6Z7jTrdRy9a23WpufAhG9ji' - -. lib/test-lib.sh - -test_add_w() { - - test_expect_success "go-random-files is installed" ' - type random-files - ' - - test_expect_success "random-files generates test files" ' - random-files --seed 7547632 --files 5 --dirs 2 --depth 3 m && - echo "$add_w_m" >expected && - ipfs add -Q -r m >actual && - test_sort_cmp expected actual - ' - - # test single file - test_expect_success "ipfs add -w (single file) succeeds" ' - ipfs add -w m/4r93 >actual - ' - - test_expect_success "ipfs add -w (single file) is correct" ' - echo "$add_w_1" >expected && - test_sort_cmp expected actual - ' - - # test two files together - test_expect_success "ipfs add -w (multiple) succeeds" ' - ipfs add -w m/4r93 m/4u6ead >actual - ' - - test_expect_success "ipfs add -w (multiple) is correct" ' - echo "$add_w_12" >expected && - test_sort_cmp expected actual - ' - - test_expect_success "ipfs add -w (multiple) succeeds" ' - ipfs add -w m/4u6ead m/4r93 >actual - ' - - test_expect_success "ipfs add -w (multiple) orders" ' - echo "$add_w_21" >expected && - test_sort_cmp expected actual - ' - - # test a directory - test_expect_success "ipfs add -w -r (dir) succeeds" ' - ipfs add -r -w m/t_1wp-8a2/_jo7 >actual - ' - - test_expect_success "ipfs add -w -r (dir) is correct" ' - echo "$add_w_d1" >expected && - test_sort_cmp expected actual - ' - - # test files and directory - test_expect_success "ipfs add -w -r succeeds" ' - ipfs add -w -r m/t_1wp-8a2/h3qpecj0 \ - m/ha6f0x7su6/gnz66h m/t_1wp-8a2/_jo7 m/4r93 >actual - ' - - test_expect_success "ipfs add -w -r is correct" ' - echo "$add_w_d2" >expected && - test_sort_cmp expected actual - ' - - # test -w -r m/* == -r m - test_expect_success "ipfs add -w -r m/* == add -r m succeeds" ' - ipfs add -Q -w -r m/* >actual - ' - - test_expect_success "ipfs add -w -r m/* == add -r m is correct" ' - echo "$add_w_m" >expected && - test_sort_cmp expected actual - ' - - # test repeats together - test_expect_success "ipfs add -w (repeats) succeeds" ' - ipfs add -Q -w -r m/t_1wp-8a2/h3qpecj0 m/ha6f0x7su6/gnz66h \ - m/t_1wp-8a2/_jo7 m/4r93 m/t_1wp-8a2 m/t_1wp-8a2 m/4r93 \ - m/4r93 m/ha6f0x7su6/_rwujlf3qh_g08 \ - m/ha6f0x7su6/gnz66h/9cwudvacx >actual - ' - - test_expect_success "ipfs add -w (repeats) is correct" ' - echo "$add_w_r" >expected && - test_sort_cmp expected actual - ' - - test_expect_success "ipfs add -w -r (dir) --cid-version=1 succeeds" ' - ipfs add -r -w --cid-version=1 m/t_1wp-8a2/_jo7 >actual - ' - - test_expect_success "ipfs add -w -r (dir) --cid-version=1 is correct" ' - echo "$add_w_d1_v1" >expected && - test_sort_cmp expected actual - ' - - test_expect_success "ipfs add -w -r -n (dir) --cid-version=1 succeeds" ' - ipfs add -r -w -n --cid-version=1 m/t_1wp-8a2/_jo7 >actual - ' - - test_expect_success "ipfs add -w -r -n (dir) --cid-version=1 is correct" ' - echo "$add_w_d1_v1" > expected && - test_sort_cmp expected actual - ' -} - -test_init_ipfs - -test_add_w - -test_launch_ipfs_daemon - -test_add_w - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0044-add-symlink.sh b/test/sharness/t0044-add-symlink.sh deleted file mode 100755 index 0e5c6efec44..00000000000 --- a/test/sharness/t0044-add-symlink.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test add -w" - -. lib/test-lib.sh - -test_expect_success "creating files succeeds" ' - mkdir -p files/foo && - mkdir -p files/bar && - echo "some text" > files/foo/baz && - ln -s files/foo/baz files/bar/baz && - ln -s files/does/not/exist files/bad -' - -test_add_symlinks() { - test_expect_success "ipfs add files succeeds" ' - ipfs add -Q -r files >filehash_out - ' - - test_expect_success "output looks good" ' - echo QmWdiHKoeSW8G1u7ATCgpx4yMoUhYaJBQGkyPLkS9goYZ8 > filehash_exp && - test_cmp filehash_exp filehash_out - ' - - test_expect_success "ipfs add --cid-version=1 files succeeds" ' - ipfs add -Q -r --cid-version=1 files >filehash_out - ' - - test_expect_success "output looks good" ' - # note this hash implies all internal nodes are stored using CidV1 - echo bafybeibyhlx64cklod6isy3h7tsmr4qvam3ae3b74n3hfes5bythjrwyua > filehash_exp && - test_cmp filehash_exp filehash_out - ' - - test_expect_success "adding a symlink adds the link itself" ' - ipfs add -q files/bar/baz > goodlink_out - ' - - test_expect_success "output looks good" ' - echo "QmdocmZeF7qwPT9Z8SiVhMSyKA2KKoA2J7jToW6z6WBmxR" > goodlink_exp && - test_cmp goodlink_exp goodlink_out - ' - - test_expect_success "adding a broken symlink works" ' - ipfs add -q files/bad > badlink_out - ' - - test_expect_success "output looks good" ' - echo "QmWYN8SEXCgNT2PSjB6BnxAx6NJQtazWoBkTRH9GRfPFFQ" > badlink_exp && - test_cmp badlink_exp badlink_out - ' - - test_expect_success "adding with symlink in middle of path is same as\ -adding with no symlink" ' - mkdir -p files2/a/b/c && - echo "some other text" > files2/a/b/c/foo && - ln -s b files2/a/d - ipfs add -rq files2/a/b/c > no_sym && - ipfs add -rq files2/a/d/c > sym && - test_cmp no_sym sym - ' -} - -test_init_ipfs - -test_add_symlinks - -test_launch_ipfs_daemon - -test_add_symlinks - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0045-ls.sh b/test/sharness/t0045-ls.sh deleted file mode 100755 index 5e02ad167cd..00000000000 --- a/test/sharness/t0045-ls.sh +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ls command" - -. lib/test-lib.sh - -test_init_ipfs - -test_ls_cmd() { - test_expect_success "'ipfs add -r testData' succeeds" ' - mkdir -p testData testData/d1 testData/d2 && - echo "test" >testData/f1 && - echo "data" >testData/f2 && - echo "hello" >testData/d1/a && - random 128 42 >testData/d1/128 && - echo "world" >testData/d2/a && - random 1024 42 >testData/d2/1024 && - echo "badname" >testData/d2/`echo -e "bad\x7fname.txt"` && - ipfs add -r testData >actual_add - ' - - test_expect_success "'ipfs add' output looks good" ' - cat <<-\EOF >expected_add && -added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 -added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a -added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 -added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a -added QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn testData/d2/bad\x7fname.txt -added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 -added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 -added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 -added Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy testData/d2 -added QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 testData -EOF - test_cmp expected_add actual_add - ' - - test_expect_success "'ipfs ls ' succeeds" ' - ipfs ls QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls - ' - - test_expect_success "'ipfs ls ' output looks good" ' - cat <<-\EOF >expected_ls && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a -EOF - test_cmp expected_ls actual_ls - ' - - test_expect_success "'ipfs ls --size=false ' succeeds" ' - ipfs ls --size=false QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls - ' - - test_expect_success "'ipfs ls ' output looks good" ' - cat <<-\EOF >expected_ls && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN a -EOF - test_cmp expected_ls actual_ls - ' - - test_expect_success "'ipfs ls --headers ' succeeds" ' - ipfs ls --headers QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_headers - ' - - test_expect_success "'ipfs ls --headers ' output looks good" ' - cat <<-\EOF >expected_ls_headers && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -Hash Size Name -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -Hash Size Name -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -Hash Size Name -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a -EOF - test_cmp expected_ls_headers actual_ls_headers - ' - - test_expect_success "'ipfs ls --size=false --cid-base=base32 ' succeeds" ' - ipfs ls --size=false --cid-base=base32 $(cid-fmt -v 1 -b base32 %s QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss) >actual_ls_base32 - ' - - test_expect_success "'ipfs ls --size=false --cid-base=base32 ' output looks good" ' - cid-fmt -b base32 -v 1 --filter %s < expected_ls > expected_ls_base32 - test_cmp expected_ls_base32 actual_ls_base32 - ' -} - - -test_ls_cmd_streaming() { - - test_expect_success "'ipfs add -r testData' succeeds" ' - mkdir -p testData testData/d1 testData/d2 && - echo "test" >testData/f1 && - echo "data" >testData/f2 && - echo "hello" >testData/d1/a && - random 128 42 >testData/d1/128 && - echo "world" >testData/d2/a && - random 1024 42 >testData/d2/1024 && - echo "badname" >testData/d2/`echo -e "bad\x7fname.txt"` && - ipfs add -r testData >actual_add - ' - - test_expect_success "'ipfs add' output looks good" ' - cat <<-\EOF >expected_add && -added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 -added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a -added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 -added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a -added QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn testData/d2/bad\x7fname.txt -added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 -added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 -added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 -added Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy testData/d2 -added QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 testData -EOF - test_cmp expected_add actual_add - ' - - test_expect_success "'ipfs ls --stream ' succeeds" ' - ipfs ls --stream QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream - ' - - test_expect_success "'ipfs ls --stream ' output looks good" ' - cat <<-\EOF >expected_ls_stream && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a -EOF - test_cmp expected_ls_stream actual_ls_stream - ' - - test_expect_success "'ipfs ls --size=false --stream ' succeeds" ' - ipfs ls --size=false --stream QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream - ' - - test_expect_success "'ipfs ls --size=false --stream ' output looks good" ' - cat <<-\EOF >expected_ls_stream && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN a -EOF - test_cmp expected_ls_stream actual_ls_stream - ' - - test_expect_success "'ipfs ls --stream --headers ' succeeds" ' - ipfs ls --stream --headers QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream_headers - ' - - test_expect_success "'ipfs ls --stream --headers ' output looks good" ' - cat <<-\EOF >expected_ls_stream_headers && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -Hash Size Name -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ -QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 -QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 - -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -Hash Size Name -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 -QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a -QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt - -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -Hash Size Name -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 -QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a -EOF - test_cmp expected_ls_stream_headers actual_ls_stream_headers - ' -} - -test_ls_cmd_raw_leaves() { - test_expect_success "'ipfs add -r --raw-leaves' then 'ipfs ls' works as expected" ' - mkdir -p somedir && - echo bar > somedir/foo && - ipfs add --raw-leaves -r somedir/ > /dev/null && - ipfs ls '$1' QmThNTdtKaVoCVrYmM5EBS6U3S5vfKFue2TxbxxAxRcKKE > ls-actual - echo "bafkreid5qzpjlgzem2iyzgddv7fjilipxcoxzgwazgn27q3usucn5wlxga 4 foo" > ls-expect - test_cmp ls-actual ls-expect - ' -} - -test_ls_object() { - test_expect_success "ipfs add medium size file then 'ipfs ls --size=false' works as expected" ' - random 500000 2 > somefile && - HASH=$(ipfs add somefile -q) && - echo "QmPrM8S5T7Q3M8DQvQMS7m41m3Aq4jBjzAzvky5fH3xfr4 " > ls-expect && - echo "QmdaAntAzQqqVMo4B8V69nkQd5d918YjHXUe2oF6hr72ri " >> ls-expect && - ipfs ls --size=false $HASH > ls-actual && - test_cmp ls-actual ls-expect - ' - - test_expect_success "ipfs add medium size file then 'ipfs ls' works as expected" ' - random 500000 2 > somefile && - HASH=$(ipfs add somefile -q) && - echo "QmPrM8S5T7Q3M8DQvQMS7m41m3Aq4jBjzAzvky5fH3xfr4 262144 " > ls-expect && - echo "QmdaAntAzQqqVMo4B8V69nkQd5d918YjHXUe2oF6hr72ri 237856 " >> ls-expect && - ipfs ls $HASH > ls-actual && - test_cmp ls-actual ls-expect - ' -} - -# should work offline -test_ls_cmd -test_ls_cmd_streaming -test_ls_cmd_raw_leaves -test_ls_cmd_raw_leaves --size -test_ls_object - -# should work online -test_launch_ipfs_daemon -test_ls_cmd -test_ls_cmd_streaming -test_ls_cmd_raw_leaves -test_ls_cmd_raw_leaves --size -test_kill_ipfs_daemon -test_ls_object - -# -# test for ls --resolve-type=false -# - -test_expect_success "'ipfs add -r' succeeds" ' - mkdir adir && - # note: not using a seed as the files need to have truly random content - random 1000 > adir/file1 && - random 1000 > adir/file2 && - ipfs add --pin=false -q -r adir > adir-hashes -' - -test_expect_success "get hashes from add output" ' - FILE=`head -1 adir-hashes` && - DIR=`tail -1 adir-hashes` && - test "$FILE" -a "$DIR" -' - -test_expect_success "remove a file in dir" ' - ipfs block rm $FILE -' - -test_expect_success "'ipfs ls --resolve-type=false ' fails" ' - test_must_fail ipfs ls --resolve-type=false $DIR > /dev/null -' - -test_expect_success "'ipfs ls' fails" ' - test_must_fail ipfs ls $DIR -' - -test_expect_success "'ipfs ls --resolve-type=true --size=false' fails" ' - test_must_fail ipfs ls --resolve-type=true --size=false $DIR -' - -test_launch_ipfs_daemon_without_network - -test_expect_success "'ipfs ls --resolve-type=false --size=false' ok" ' - ipfs ls --resolve-type=false --size=false $DIR > /dev/null -' - -test_expect_success "'ipfs ls' fails" ' - test_must_fail ipfs ls $DIR -' - -test_expect_success "'ipfs ls --resolve-type=false --size=true' fails" ' - test_must_fail ipfs ls --resolve-type=false --size=true $DIR -' - -test_kill_ipfs_daemon - -test_launch_ipfs_daemon - -# now we try `ipfs ls --resolve-type=false` with the daemon online It -# should not even attempt to retrieve the file from the network. If -# it does it should eventually fail as the content is random and -# should not exist on the network, but we don't want to wait for a -# timeout so we will kill the request after a few seconds -test_expect_success "'ipfs ls --resolve-type=false --size=false' ok and does not hang" ' - go-timeout 2 ipfs ls --resolve-type=false --size=false $DIR -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0046-id-hash.sh b/test/sharness/t0046-id-hash.sh deleted file mode 100755 index d4c28f21507..00000000000 --- a/test/sharness/t0046-id-hash.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test basic operations with identity hash" - -. lib/test-lib.sh - -test_init_ipfs - -ID_HASH0=bafkqaedknncdsodknncdsnzvnbvuioak -ID_HASH0_CONTENTS=jkD98jkD975hkD8 - -test_expect_success "can fetch random identity hash" ' - ipfs cat $ID_HASH0 > expected && - echo $ID_HASH0_CONTENTS > actual && - test_cmp expected actual -' - -test_expect_success "can pin random identity hash" ' - ipfs pin add $ID_HASH0 -' - -test_expect_success "ipfs add succeeds with identity hash" ' - echo "djkd7jdkd7jkHHG" > junk.txt && - HASH=$(ipfs add -q --hash=identity junk.txt) -' - -test_expect_success "content not actually added" ' - ipfs refs local | fgrep -q -v $HASH -' - -test_expect_success "but can fetch it anyway" ' - ipfs cat $HASH > actual && - test_cmp junk.txt actual -' - -test_expect_success "block rm does nothing" ' - ipfs pin rm $HASH && - ipfs block rm $HASH -' - -test_expect_success "can still fetch it" ' - ipfs cat $HASH > actual - test_cmp junk.txt actual -' - -test_expect_success "ipfs add --inline works as expected" ' - echo $ID_HASH0_CONTENTS > afile && - HASH=$(ipfs add -q --inline afile) -' - -test_expect_success "ipfs add --inline uses identity multihash" ' - MHTYPE=`cid-fmt %h $HASH` - echo "mhtype is $MHTYPE" - test "$MHTYPE" = identity -' - -test_expect_success "ipfs add --inline --raw-leaves works as expected" ' - echo $ID_HASH0_CONTENTS > afile && - HASH=$(ipfs add -q --inline --raw-leaves afile) -' - -test_expect_success "ipfs add --inline --raw-leaves outputs the correct hash" ' - echo "$ID_HASH0" = "$HASH" && - test "$ID_HASH0" = "$HASH" -' - -test_expect_success "create 1000 bytes file and get its hash" ' - random 1000 2 > 1000bytes && - HASH0=$(ipfs add -q --raw-leaves --only-hash 1000bytes) -' - -test_expect_success "ipfs add --inline --raw-leaves works as expected on large file" ' - HASH=$(ipfs add -q --inline --raw-leaves 1000bytes) -' - -test_expect_success "ipfs add --inline --raw-leaves outputs the correct hash on large file" ' - echo "$HASH0" = "$HASH" && - test "$HASH0" = "$HASH" -' - -test_expect_success "enable filestore" ' - ipfs config --json Experimental.FilestoreEnabled true -' - -test_expect_success "can fetch random identity hash (filestore enabled)" ' - ipfs cat $ID_HASH0 > expected && - echo $ID_HASH0_CONTENTS > actual && - test_cmp expected actual -' - -test_expect_success "can pin random identity hash (filestore enabled)" ' - ipfs pin add $ID_HASH0 -' - -test_expect_success "ipfs add succeeds with identity hash and --nocopy" ' - echo "djkd7jdkd7jkHHG" > junk.txt && - HASH=$(ipfs add -q --hash=identity --nocopy junk.txt) -' - -test_expect_success "content not actually added (filestore enabled)" ' - ipfs refs local | fgrep -q -v $HASH -' - -test_expect_success "but can fetch it anyway (filestore enabled)" ' - ipfs cat $HASH > actual && - test_cmp junk.txt actual -' - -test_done diff --git a/test/sharness/t0047-add-mode-mtime.sh b/test/sharness/t0047-add-mode-mtime.sh deleted file mode 100755 index 520c692f3bc..00000000000 --- a/test/sharness/t0047-add-mode-mtime.sh +++ /dev/null @@ -1,513 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test storing and retrieving mode and mtime" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "set Import defaults to ensure deterministic cids for mod and mtime tests" ' - ipfs config --json Import.CidVersion 0 && - ipfs config Import.HashFunction sha2-256 && - ipfs config Import.UnixFSChunker size-262144 -' - -HASH_NO_PRESERVE=QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH - -PRESERVE_MTIME=1604320482 -PRESERVE_MODE="0640" -HASH_PRESERVE_MODE=QmQLgxypSNGNFTuUPGCecq6dDEjb6hNB5xSyVmP3cEuNtq -HASH_PRESERVE_MTIME=QmQ6kErEW8kztQFV8vbwNU8E4dmtGsYpRiboiLxUEwibvj -HASH_PRESERVE_LINK_MTIME=QmbJwotgtr84JxcnjpwJ86uZiyMoxbZuNH4YrdJMypkYaB -HASH_PRESERVE_MODE_AND_MTIME=QmYkvboLsvLFcSYmqVJRxvBdYRQLroLv9kELf3LRiCqBri - -CUSTOM_MTIME=1603539720 -CUSTOM_MTIME_NSECS=54321 -CUSTOM_MODE="0764" -HASH_CUSTOM_MODE=QmchD3BN8TQ3RW6jPLxSaNkqvfuj7syKhzTRmL4EpyY1Nz -HASH_CUSTOM_MTIME=QmT3aY4avDcYXCWpU8CJzqUkW7YEuEsx36S8cTNoLcuK1B -HASH_CUSTOM_MTIME_NSECS=QmaKH8H5rXBUBCX4vdxi7ktGQEL7wejV7L9rX2qpZjwncz -HASH_CUSTOM_MODE_AND_MTIME=QmUkxrtBA8tPjwCYz1HrsoRfDz6NgKut3asVeHVQNH4C8L -HASH_CUSTOM_LINK_MTIME=QmV1Uot2gy4bhY9yvYiZxhhchhyYC6MKKoGV1XtWNmpCLe -HASH_CUSTOM_LINK_MTIME_NSECS=QmPHYCxYvvHj6VxiPNJ3kXxcPsnJLDYUJqsDJWjvytmrmY - -mk_name() { - tr -dc '[:alnum:]'expected_in && - ipfs block put expected_out && - test_cmp expected_out actual_out -' - -test_expect_success "'ipfs block put' with 2 files succeeds" ' - echo "Hello Mars!" > a && - echo "Hello Venus!" > b && - ipfs block put a b | tee actual_out -' - -test_expect_success "'ipfs block put' output looks good" ' - echo "$HASH" >expected_out && - echo "$HASHB" >>expected_out && - test_cmp expected_out actual_out -' - -test_expect_success "can set cid codec on block put" ' - CODEC_HASH=$(ipfs block put --cid-codec=dag-pb ../t0050-block-data/testPut.pb) -' - -test_expect_success "block get output looks right" ' - ipfs block get $CODEC_HASH > pb_block_out && - test_cmp pb_block_out ../t0050-block-data/testPut.pb -' - -# -# "block get" tests -# - -test_expect_success "'ipfs block get' succeeds" ' - ipfs block get $HASH >actual_in -' - -test_expect_success "'ipfs block get' output looks good" ' - test_cmp expected_in actual_in -' - -# -# "block stat" tests -# - -test_expect_success "'ipfs block stat' succeeds" ' - ipfs block stat $HASH >actual_stat -' - -test_expect_success "'ipfs block stat' output looks good" ' - echo "Key: $HASH" >expected_stat && - echo "Size: 12" >>expected_stat && - test_cmp expected_stat actual_stat -' - -# -# "block rm" tests -# - -test_expect_success "'ipfs block rm' succeeds" ' - ipfs block rm $HASH >actual_rm -' - -test_expect_success "'ipfs block rm' output looks good" ' - echo "removed $HASH" > expected_rm && - test_cmp expected_rm actual_rm -' - -test_expect_success "'ipfs block rm' block actually removed" ' - test_must_fail ipfs block stat $HASH -' - -RANDOMHASH=QmRKqGMAM6EbngbZjSqrvYzq5Qd8b1bSWymjSUY9zQSNDq -DIRHASH=QmdWmVmM6W2abTgkEfpbtA1CJyTWS2rhuUB9uP1xV8Uwtf -FILE1HASH=Qmae3RedM7SNkWGsdzYzsr6svmsFdsva4WoTvYYsWhUSVz -FILE2HASH=QmUtkGLvPf63NwVzLPKPUYgwhn8ZYPWF6vKWN3fZ2amfJF -FILE3HASH=Qmesmmf1EEG1orJb6XdK6DabxexsseJnCfw8pqWgonbkoj -TESTHASH=QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH - -test_expect_success "add and pin directory" ' - echo "test" | ipfs add --pin=false && - mkdir adir && - echo "file1" > adir/file1 && - echo "file2" > adir/file2 && - echo "file3" > adir/file3 && - ipfs add -r adir - ipfs pin add -r $DIRHASH -' - -test_expect_success "can't remove pinned block" ' - test_must_fail ipfs block rm $DIRHASH 2> block_rm_err -' - -test_expect_success "can't remove pinned block: output looks good" ' - grep -q "$DIRHASH: pinned: recursive" block_rm_err -' - -test_expect_success "can't remove indirectly pinned block" ' - test_must_fail ipfs block rm $FILE1HASH 2> block_rm_err -' - -test_expect_success "can't remove indirectly pinned block: output looks good" ' - grep -q "$FILE1HASH: pinned via $DIRHASH" block_rm_err -' - -test_expect_success "remove pin" ' - ipfs pin rm -r $DIRHASH -' - -test_expect_success "multi-block 'ipfs block rm' succeeds" ' - ipfs block rm $FILE1HASH $FILE2HASH $FILE3HASH > actual_rm -' - -test_expect_success "multi-block 'ipfs block rm' output looks good" ' - grep -F -q "removed $FILE1HASH" actual_rm && - grep -F -q "removed $FILE2HASH" actual_rm && - grep -F -q "removed $FILE3HASH" actual_rm -' - -test_expect_success "multi-block 'ipfs block rm '" ' - test_must_fail ipfs block rm $RANDOMHASH $TESTHASH $RANDOMHASH &> actual_mixed_rm -' - -test_expect_success "multi-block 'ipfs block rm ' output looks good" ' - echo "cannot remove $RANDOMHASH: ipld: could not find $RANDOMHASH" >> expect_mixed_rm && - echo "removed $TESTHASH" >> expect_mixed_rm && - echo "cannot remove $RANDOMHASH: ipld: could not find $RANDOMHASH" >> expect_mixed_rm && - echo "Error: some blocks not removed" >> expect_mixed_rm - test_cmp actual_mixed_rm expect_mixed_rm -' - -test_expect_success "'add some blocks' succeeds" ' - echo "Hello Mars!" | ipfs block put && - echo "Hello Venus!" | ipfs block put -' - -test_expect_success "add and pin directory" ' - ipfs add -r adir - ipfs pin add -r $DIRHASH -' - -HASH=QmRKqGMAM6EZngbpjSqrvYzq5Qd8b1bSWymjSUY9zQSNDk -HASH2=QmdnpnsaEj69isdw5sNzp3h3HkaDz7xKq7BmvFFBzNr5e7 - -test_expect_success "multi-block 'ipfs block rm' mixed" ' - test_must_fail ipfs block rm $FILE1HASH $DIRHASH $HASH $FILE3HASH $RANDOMHASH $HASH2 2> block_rm_err -' - -test_expect_success "pinned block not removed" ' - ipfs block stat $FILE1HASH && - ipfs block stat $FILE3HASH -' - -test_expect_success "non-pinned blocks removed" ' - test_must_fail ipfs block stat $HASH && - test_must_fail ipfs block stat $HASH2 -' - -test_expect_success "error reported on removing non-existent block" ' - grep -q "cannot remove $RANDOMHASH" block_rm_err -' - -test_expect_success "'add some blocks' succeeds" ' - echo "Hello Mars!" | ipfs block put && - echo "Hello Venus!" | ipfs block put -' - -test_expect_success "multi-block 'ipfs block rm -f' with non existent blocks succeed" ' - ipfs block rm -f $HASH $RANDOMHASH $HASH2 -' - -test_expect_success "existent blocks removed" ' - test_must_fail ipfs block stat $HASH && - test_must_fail ipfs block stat $HASH2 -' - -test_expect_success "'add some blocks' succeeds" ' - echo "Hello Mars!" | ipfs block put && - echo "Hello Venus!" | ipfs block put -' - -test_expect_success "multi-block 'ipfs block rm -q' produces no output" ' - ipfs block rm -q $HASH $HASH2 > block_rm_out && - test ! -s block_rm_out -' - -# --format used 'protobuf' for 'dag-pb' which was invalid, but we keep -# for backward-compatibility -test_expect_success "can set deprecated --format=protobuf on block put" ' - HASH=$(ipfs block put --format=protobuf ../t0050-block-data/testPut.pb) -' - -test_expect_success "created an object correctly!" ' - ipfs dag get $HASH > obj_out && - echo -n "{\"Data\":{\"/\":{\"bytes\":\"dGVzdCBqc29uIGZvciBzaGFybmVzcyB0ZXN0\"}},\"Links\":[]}" > obj_exp && - test_cmp obj_out obj_exp -' - -test_expect_success "block get output looks right" ' - ipfs block get $HASH > pb_block_out && - test_cmp pb_block_out ../t0050-block-data/testPut.pb -' - -test_expect_success "can set --cid-codec=dag-pb on block put" ' - HASH=$(ipfs block put --cid-codec=dag-pb ../t0050-block-data/testPut.pb) -' - -test_expect_success "created an object correctly!" ' - ipfs dag get $HASH > obj_out && - echo -n "{\"Data\":{\"/\":{\"bytes\":\"dGVzdCBqc29uIGZvciBzaGFybmVzcyB0ZXN0\"}},\"Links\":[]}" > obj_exp && - test_cmp obj_out obj_exp -' - -test_expect_success "block get output looks right" ' - ipfs block get $HASH > pb_block_out && - test_cmp pb_block_out ../t0050-block-data/testPut.pb -' - -test_expect_success "can set multihash type and length on block put with --format=raw (deprecated)" ' - HASH=$(echo "foooo" | ipfs block put --format=raw --mhtype=sha3 --mhlen=20) -' - -test_expect_success "output looks good" ' - test "bafkrifctrq4xazzixy2v4ezymjcvzpskqdwlxra" = "$HASH" -' - -test_expect_success "can't use both legacy format and custom cid-codec at the same time" ' - test_expect_code 1 ipfs block put --format=dag-cbor --cid-codec=dag-json < ../t0050-block-data/testPut.pb 2> output && - test_should_contain "unable to use \"format\" (deprecated) and a custom \"cid-codec\" at the same time" output -' - -test_expect_success "can read block with different hash" ' - ipfs block get $HASH > blk_get_out && - echo "foooo" > blk_get_exp && - test_cmp blk_get_exp blk_get_out -' -# -# Misc tests -# - -test_expect_success "'ipfs block stat' with nothing from stdin doesn't crash" ' - test_expect_code 1 ipfs block stat < /dev/null 2> stat_out -' - -# lol -test_expect_success "no panic in output" ' - test_expect_code 1 grep "panic" stat_out -' - -test_expect_success "can set multihash type and length on block put without format or cid-codec" ' - HASH=$(echo "foooo" | ipfs block put --mhtype=sha3 --mhlen=20) -' - -test_expect_success "output looks good" ' - test "bafkrifctrq4xazzixy2v4ezymjcvzpskqdwlxra" = "$HASH" -' - -test_expect_success "can set multihash type and length on block put with cid-codec=dag-pb" ' - HASH=$(echo "foooo" | ipfs block put --mhtype=sha3 --mhlen=20 --cid-codec=dag-pb) -' - -test_expect_success "output looks good" ' - test "bafybifctrq4xazzixy2v4ezymjcvzpskqdwlxra" = "$HASH" -' - -test_expect_success "put with sha3 and cidv0 fails" ' - echo "foooo" | test_must_fail ipfs block put --mhtype=sha3 --mhlen=20 --format=v0 -' - -test_expect_success "'ipfs block put' check block size" ' - dd if=/dev/zero bs=2MB count=1 > 2-MB-file && - test_expect_code 1 ipfs block put 2-MB-file >block_put_out 2>&1 - ' - - test_expect_success "ipfs block put output has the correct error" ' - grep "produced block is over 1MiB" block_put_out - ' - - test_expect_success "ipfs block put --allow-big-block=true works" ' - test_expect_code 0 ipfs block put 2-MB-file --allow-big-block=true && - rm 2-MB-file - ' - -test_done diff --git a/test/sharness/t0051-object.sh b/test/sharness/t0051-object.sh deleted file mode 100755 index 4bac6137488..00000000000 --- a/test/sharness/t0051-object.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Henry Bubert -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test object command" - -. lib/test-lib.sh - -test_init_ipfs - -test_patch_create_path() { - root=$1 - name=$2 - target=$3 - - test_expect_success "object patch --create works" ' - PCOUT=$(ipfs object patch $root add-link --create $name $target) - ' - - test_expect_success "output looks good" ' - ipfs cat "$PCOUT/$name" >tpcp_out && - ipfs cat "$target" >tpcp_exp && - test_cmp tpcp_exp tpcp_out - ' -} - -test_object_cmd() { - EMPTY_DIR=$(echo '{"Links":[]}' | ipfs dag put --store-codec dag-pb) - EMPTY_UNIXFS_DIR=$(echo '{"Data":{"/":{"bytes":"CAE"}},"Links":[]}' | ipfs dag put --store-codec dag-pb) - - test_expect_success "'ipfs object patch' should work (no unixfs-dir)" ' - OUTPUT=$(ipfs object patch $EMPTY_DIR add-link foo $EMPTY_DIR) && - ipfs dag stat $OUTPUT - ' - - test_expect_success "'ipfs object patch' should work" ' - OUTPUT=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo $EMPTY_UNIXFS_DIR) && - ipfs dag stat $OUTPUT - ' - - test_expect_success "'ipfs object patch' check output block size" ' - DIR=$EMPTY_UNIXFS_DIR - for i in {1..13} - do - DIR=$(ipfs object patch "$DIR" add-link "$DIR.jpg" "$DIR") - done - # Fail when new block goes over the BS limit of 1MiB, but allow manual override - test_expect_code 1 ipfs object patch "$DIR" add-link "$DIR.jpg" "$DIR" >patch_out 2>&1 - ' - - test_expect_success "ipfs object patch add-link output has the correct error" ' - grep "produced block is over 1MiB" patch_out - ' - - test_expect_success "ipfs object patch --allow-big-block=true add-link works" ' - test_expect_code 0 ipfs object patch --allow-big-block=true "$DIR" add-link "$DIR.jpg" "$DIR" - ' - - test_expect_success "'ipfs object patch add-link' should work with paths" ' - N1=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link baz $EMPTY_UNIXFS_DIR) && - N2=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link bar $N1) && - N3=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo /ipfs/$N2/bar) && - ipfs dag stat /ipfs/$N3 > /dev/null && - ipfs dag stat $N3/foo > /dev/null && - ipfs dag stat /ipfs/$N3/foo/baz > /dev/null - ' - - test_expect_success "'ipfs object patch add-link' allow linking IPLD objects" ' - OBJ=$(echo "123" | ipfs dag put) && - N1=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo $OBJ) && - - ipfs dag stat /ipfs/$N1 > /dev/null && - ipfs resolve /ipfs/$N1/foo > actual && - echo /ipfs/$OBJ > expected && - - test_cmp expected actual - ' - - test_expect_success "object patch creation looks right" ' - echo "bafybeiakusqwohnt7bs75kx6jhmt4oi47l634bmudxfv4qxhpco6xuvgna" > hash_exp && - echo $N3 > hash_actual && - test_cmp hash_exp hash_actual - ' - - test_expect_success "multilayer ipfs patch works" ' - echo "hello world" > hwfile && - FILE=$(ipfs add -q hwfile) && - EMPTY=$EMPTY_UNIXFS_DIR && - ONE=$(ipfs object patch $EMPTY add-link b $EMPTY) && - TWO=$(ipfs object patch $EMPTY add-link a $ONE) && - ipfs object patch $TWO add-link a/b/c $FILE > multi_patch - ' - - test_expect_success "output looks good" ' - ipfs cat $(cat multi_patch)/a/b/c > hwfile_out && - test_cmp hwfile hwfile_out - ' - - test_expect_success "can remove the directory" ' - ipfs object patch $OUTPUT rm-link foo > rmlink_output - ' - - test_expect_success "output should be empty" ' - echo bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354 > rmlink_exp && - test_cmp rmlink_exp rmlink_output - ' - - test_expect_success "multilayer rm-link should work" ' - ipfs object patch $(cat multi_patch) rm-link a/b/c > multi_link_rm_out - ' - - test_expect_success "output looks good" ' - echo "bafybeicourxysmtbe5hacxqico4d5hyvh7gqkrwlmqa4ew7zufn3pj3juu" > multi_link_rm_exp && - test_cmp multi_link_rm_exp multi_link_rm_out - ' - - test_patch_create_path $EMPTY a/b/c $FILE - - test_patch_create_path $EMPTY a $FILE - - test_patch_create_path $EMPTY a/b/b/b/b $FILE - - test_expect_success "can create blank object" ' - BLANK=$EMPTY_DIR - ' - - test_patch_create_path $BLANK a $FILE - - test_expect_success "create bad path fails" ' - test_must_fail ipfs object patch $EMPTY add-link --create / $FILE - ' -} - -# should work offline -test_object_cmd - -# should work online -test_launch_ipfs_daemon -test_object_cmd -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0052-object-diff.sh b/test/sharness/t0052-object-diff.sh deleted file mode 100755 index 41c890cea2d..00000000000 --- a/test/sharness/t0052-object-diff.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test object diff command" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "create some objects for testing diffs" ' - mkdir foo && - echo "stuff" > foo/bar && - mkdir foo/baz && - A=$(ipfs add -r -Q foo) && - AR=$(ipfs add --raw-leaves -r -Q foo) && - echo "more things" > foo/cat && - B=$(ipfs add -r -Q foo) && - BR=$(ipfs add --raw-leaves -r -Q foo) && - echo "nested" > foo/baz/dog && - C=$(ipfs add -r -Q foo) - CR=$(ipfs add --raw-leaves -r -Q foo) - echo "changed" > foo/bar && - D=$(ipfs add -r -Q foo) && - DR=$(ipfs add --raw-leaves -r -Q foo) && - echo "" > single_file && - SINGLE_FILE=$(ipfs add -r -Q single_file) && - SINGLE_FILE_RAW=$(ipfs add --raw-leaves -r -Q single_file) && - mkdir empty_dir - EMPTY_DIR=$(ipfs add -r -Q empty_dir) - EMPTY_DIR_RAW=$(ipfs add --raw-leaves -r -Q empty_dir) -' - -test_expect_success "diff against self is empty" ' - ipfs object diff $A $A > diff_out -' - -test_expect_success "identity diff output looks good" ' - printf "" > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) against self is empty" ' - ipfs object diff $AR $AR > diff_raw_out -' - -test_expect_success "identity diff (raw-leaves) output looks good" ' - printf "" > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff against self (single file) is empty" ' - ipfs object diff $SINGLE_FILE $SINGLE_FILE > diff_out && - printf "" > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) against self (single file) is empty" ' - ipfs object diff $SINGLE_FILE_RAW $SINGLE_FILE_RAW > diff_raw_out && - printf "" > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff against self (empty dir) is empty" ' - ipfs object diff $EMPTY_DIR $EMPTY_DIR > diff_out && - printf "" > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) against self (empty dir) is empty" ' - ipfs object diff $EMPTY_DIR_RAW $EMPTY_DIR_RAW > diff_raw_out && - printf "" > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff added link works" ' - ipfs object diff $A $B > diff_out -' - -test_expect_success "diff added link looks right" ' - echo + QmUSvcqzhdfYM1KLDbM76eLPdS9ANFtkJvFuPYeZt73d7A \"cat\" > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) added link works" ' - ipfs object diff $AR $BR > diff_raw_out -' - -test_expect_success "diff (raw-leaves) added link looks right" ' - echo + bafkreig43bpnc6sjo6izaiqzzq5esapazosa3f3wt6jsflwiu3x7ydhq2u \"cat\" > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "verbose diff added link works" ' - ipfs object diff -v $A $B > diff_out -' - -test_expect_success "verbose diff added link looks right" ' - echo Added new link \"cat\" pointing to QmUSvcqzhdfYM1KLDbM76eLPdS9ANFtkJvFuPYeZt73d7A. > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "verbose diff (raw-leaves) added link works" ' - ipfs object diff -v $AR $BR > diff_raw_out -' - -test_expect_success "verbose diff (raw-leaves) added link looks right" ' - echo Added new link \"cat\" pointing to bafkreig43bpnc6sjo6izaiqzzq5esapazosa3f3wt6jsflwiu3x7ydhq2u. > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff removed link works" ' - ipfs object diff -v $B $A > diff_out -' - -test_expect_success "diff removed link looks right" ' - echo Removed link \"cat\" \(was QmUSvcqzhdfYM1KLDbM76eLPdS9ANFtkJvFuPYeZt73d7A\). > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) removed link works" ' - ipfs object diff -v $BR $AR > diff_raw_out -' - -test_expect_success "diff (raw-leaves) removed link looks right" ' - echo Removed link \"cat\" \(was bafkreig43bpnc6sjo6izaiqzzq5esapazosa3f3wt6jsflwiu3x7ydhq2u\). > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff nested add works" ' - ipfs object diff -v $B $C > diff_out -' - -test_expect_success "diff looks right" ' - echo Added new link \"baz/dog\" pointing to QmdNJQUTZuDpsUcec7YDuCfRfvw1w4J13DCm7YcU4VMZdS. > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) nested add works" ' - ipfs object diff -v $BR $CR > diff_raw_out -' - -test_expect_success "diff (raw-leaves) looks right" ' - echo Added new link \"baz/dog\" pointing to bafkreibxbkgajofglo2esqtv53bcp4nwstnqjr3nu2ylrlui5unldf4qum. > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_expect_success "diff changed link works" ' - ipfs object diff -v $C $D > diff_out -' - -test_expect_success "diff looks right" ' - echo Changed \"bar\" from QmNgd5cz2jNftnAHBhcRUGdtiaMzb5Rhjqd4etondHHST8 to QmRfFVsjSXkhFxrfWnLpMae2M4GBVsry6VAuYYcji5MiZb. > diff_exp && - test_cmp diff_exp diff_out -' - -test_expect_success "diff (raw-leaves) changed link works" ' - ipfs object diff -v $CR $DR > diff_raw_out -' - -test_expect_success "diff(raw-leaves) looks right" ' - echo Changed \"bar\" from bafkreidfn2oemjv5ns2fnc4ukgbjwt6bq5gdd4ciz4mpnehqi2dvwxfbde to bafkreid7rmo7yrtlmje7a3f6kxerotpsk6hhovg2pe755use55olukry6e. > diff_raw_exp && - test_cmp diff_raw_exp diff_raw_out -' - -test_done diff --git a/test/sharness/t0053-dag.sh b/test/sharness/t0053-dag.sh deleted file mode 100755 index 21fd2c04f1d..00000000000 --- a/test/sharness/t0053-dag.sh +++ /dev/null @@ -1,442 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test dag command" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "make a few test files" ' - echo "foo" > file1 && - echo "bar" > file2 && - echo "baz" > file3 && - echo "qux" > file4 && - HASH1=$(ipfs add --pin=false -q file1) && - HASH2=$(ipfs add --pin=false -q file2) && - HASH3=$(ipfs add --pin=false -q file3) && - HASH4=$(ipfs add --pin=false -q file4) -' - -test_expect_success "make an ipld object in dag-json" ' - printf "{\"hello\":\"world\",\"cats\":[{\"/\":\"%s\"},{\"water\":{\"/\":\"%s\"}}],\"magic\":{\"/\":\"%s\"},\"sub\":{\"dict\":\"ionary\",\"beep\":[0,\"bop\"]}}" $HASH1 $HASH2 $HASH3 > ipld_object -' - -# This data is in https://github.com/ipld/codec-fixtures/tree/master/fixtures/dagpb_Data_some -test_expect_success "make the same ipld object in dag-cbor, dag-json and dag-pb" ' - echo "omREYXRhRQABAgMEZUxpbmtzgA==" | base64 -d > ipld_object_dagcbor - echo "CgUAAQIDBA==" | base64 -d > ipld_object_dagpb - echo "{\"Data\":{\"/\":{\"bytes\":\"AAECAwQ\"}},\"Links\":[]}" > ipld_object_dagjson -' - -test_dag_cmd() { - # Working with a plain IPLD hello-world object that's dag-json and dag-cbor compatible - - test_expect_success "can add an ipld object using defaults (dag-json to dag-cbor)" ' - IPLDHASH=$(cat ipld_object | ipfs dag put) - ' - - test_expect_success "CID looks correct" ' - EXPHASH="bafyreiblwimnjbqcdoeafiobk6q27jcw64ew7n2fmmhdpldd63edmjecde" - test $EXPHASH = $IPLDHASH - ' - -test_expect_success "'ipfs dag put' check block size" ' - dd if=/dev/zero bs=2MB count=1 > 2-MB-file && - test_expect_code 1 ipfs dag put --input-codec=raw --store-codec=raw 2-MB-file >dag_put_out 2>&1 - ' - - test_expect_success "ipfs dag put output has the correct error" ' - grep "produced block is over 1MiB" dag_put_out - ' - - test_expect_success "ipfs dag put --allow-big-block=true works" ' - test_expect_code 0 ipfs dag put --input-codec=raw --store-codec=raw 2-MB-file --allow-big-block=true && - rm 2-MB-file - ' - - test_expect_success "can add an ipld object using dag-json to dag-json" ' - IPLDHASH=$(cat ipld_object | ipfs dag put --input-codec dag-json --store-codec dag-json) - ' - - test_expect_success "CID looks correct" ' - EXPHASH="baguqeera6gviseelmbzn2ugoddo5vulxlshqs3kw5ymgsb6w4cabnoh4ldpa" - test $EXPHASH = $IPLDHASH - ' - - test_expect_success "can add an ipld object using dag-json to dag-cbor" ' - IPLDHASH=$(cat ipld_object | ipfs dag put --input-codec dag-json --store-codec dag-cbor) - ' - - test_expect_success "CID looks correct" ' - EXPHASH="bafyreiblwimnjbqcdoeafiobk6q27jcw64ew7n2fmmhdpldd63edmjecde" - test $EXPHASH = $IPLDHASH - ' - - test_expect_success "can add an ipld object using cid-base=base58btc" ' - IPLDb58HASH=$(cat ipld_object | ipfs dag put -cid-base=base58btc) - ' - - test_expect_success "CID looks correct" ' - EXPHASH="zdpuAoN1XJ3GsrxEzMuCbRKZzRUVJekJUCbPVgCgE4D9yYqVi" - test $EXPHASH = $IPLDb58HASH - ' - - # Same object, different forms - # (1) dag-cbor input - - test_expect_success "can add a dag-cbor input block stored as dag-cbor" ' - IPLDCBORHASH=$(cat ipld_object_dagcbor | ipfs dag put --input-codec dag-cbor --store-codec dag-cbor) - ' - - test_expect_success "dag-cbor CID looks correct" ' - EXPHASH="bafyreieculsmrexh3ty5jentbvuku452o27mst4h2tq2rb2zntqhgcstji" - test $EXPHASH = $IPLDCBORHASH - ' - - test_expect_success "can add a dag-cbor input block stored as dag-pb" ' - IPLDPBHASH=$(cat ipld_object_dagcbor | ipfs dag put --input-codec dag-cbor --store-codec dag-pb) - ' - - test_expect_success "dag-pb CID looks correct" ' - EXPHASH="bafybeibazl2z4vqp2tmwcfag6wirmtpnomxknqcgrauj7m2yisrz3qjbom" - test $EXPHASH = $IPLDPBHASH - ' - - test_expect_success "can add a dag-cbor input block stored as dag-json" ' - IPLDJSONHASH=$(cat ipld_object_dagcbor | ipfs dag put --input-codec dag-cbor --store-codec dag-json) - ' - - test_expect_success "dag-json CID looks correct" ' - EXPHASH="baguqeerajwksxu3lxpomdwxvosl542zl3xknhjgxtq3277gafrhl6vdw5tcq" - test $EXPHASH = $IPLDJSONHASH - ' - - # (2) dag-json input - - test_expect_success "can add a dag-json input block stored as dag-cbor" ' - IPLDCBORHASH=$(cat ipld_object_dagjson | ipfs dag put --input-codec dag-json --store-codec dag-cbor) - ' - - test_expect_success "dag-cbor CID looks correct" ' - EXPHASH="bafyreieculsmrexh3ty5jentbvuku452o27mst4h2tq2rb2zntqhgcstji" - test $EXPHASH = $IPLDCBORHASH - ' - - test_expect_success "can add a dag-json input block stored as dag-pb" ' - IPLDPBHASH=$(cat ipld_object_dagjson | ipfs dag put --input-codec dag-json --store-codec dag-pb) - ' - - test_expect_success "dag-pb CID looks correct" ' - EXPHASH="bafybeibazl2z4vqp2tmwcfag6wirmtpnomxknqcgrauj7m2yisrz3qjbom" - test $EXPHASH = $IPLDPBHASH - ' - - test_expect_success "can add a dag-json input block stored as dag-json" ' - IPLDJSONHASH=$(cat ipld_object_dagjson | ipfs dag put --input-codec dag-json --store-codec dag-json) - ' - - test_expect_success "dag-json CID looks correct" ' - EXPHASH="baguqeerajwksxu3lxpomdwxvosl542zl3xknhjgxtq3277gafrhl6vdw5tcq" - test $EXPHASH = $IPLDJSONHASH - ' - - # (3) dag-pb input - - test_expect_success "can add a dag-pb input block stored as dag-cbor" ' - IPLDCBORHASH=$(cat ipld_object_dagpb | ipfs dag put --input-codec dag-pb --store-codec dag-cbor) - ' - - test_expect_success "dag-cbor CID looks correct" ' - EXPHASH="bafyreieculsmrexh3ty5jentbvuku452o27mst4h2tq2rb2zntqhgcstji" - test $EXPHASH = $IPLDCBORHASH - ' - - test_expect_success "can add a dag-pb input block stored as dag-pb" ' - IPLDPBHASH=$(cat ipld_object_dagpb | ipfs dag put --input-codec dag-pb --store-codec dag-pb) - ' - - test_expect_success "dag-pb CID looks correct" ' - EXPHASH="bafybeibazl2z4vqp2tmwcfag6wirmtpnomxknqcgrauj7m2yisrz3qjbom" - test $EXPHASH = $IPLDPBHASH - ' - - test_expect_success "can add a dag-pb input block stored as dag-json" ' - IPLDJSONHASH=$(cat ipld_object_dagpb | ipfs dag put --input-codec dag-pb --store-codec dag-json) - ' - - test_expect_success "dag-json CID looks correct" ' - EXPHASH="baguqeerajwksxu3lxpomdwxvosl542zl3xknhjgxtq3277gafrhl6vdw5tcq" - test $EXPHASH = $IPLDJSONHASH - ' - - test_expect_success "can get dag-cbor, dag-json, dag-pb blocks as dag-json" ' - ipfs dag get $IPLDCBORHASH >& dag-get-cbor && - ipfs dag get $IPLDJSONHASH >& dag-get-json && - ipfs dag get $IPLDPBHASH >& dag-get-pb - ' - - test_expect_success "can get dag-pb block transcoded as dag-cbor" ' - ipfs dag get --output-codec=dag-cbor $IPLDPBHASH >& dag-get-dagpb-transcoded-to-dagcbor && - echo "122082a2e4c892e7dcf1d491b30d68aa73ba76bec94f87d4e1a887596ce0730a534a" >sha2_dagpb_to_dagcbor_expected && - multihash -a=sha2-256 -e=hex dag-get-dagpb-transcoded-to-dagcbor >sha2_dagpb_to_dagcbor_actual && - test_cmp sha2_dagpb_to_dagcbor_expected sha2_dagpb_to_dagcbor_actual - ' - - test_expect_success "dag put and dag get transcodings match" ' - ROUNDTRIPDAGCBOR=$(ipfs dag put --input-codec=dag-cbor --store-codec=dag-cbor dag-get-dagpb-transcoded-to-dagcbor) && - test $ROUNDTRIPDAGCBOR = $IPLDCBORHASH - ' - - # this doesn't tell us if they are correct, we test that better below - test_expect_success "outputs are the same" ' - test_cmp dag-get-cbor dag-get-json && - test_cmp dag-get-cbor dag-get-pb - ' - - # Traversals using the original hello-world object - - test_expect_success "various path traversals work" ' - ipfs cat $IPLDHASH/cats/0 > out1 && - ipfs cat $IPLDHASH/cats/1/water > out2 && - ipfs cat $IPLDHASH/magic > out3 - ' - - test_expect_success "outputs look correct" ' - test_cmp file1 out1 && - test_cmp file2 out2 && - test_cmp file3 out3 - ' - - test_expect_success "resolving sub-objects works" ' - ipfs dag get $IPLDHASH/hello > sub1 && - ipfs dag get $IPLDHASH/sub > sub2 && - ipfs dag get $IPLDHASH/sub/beep > sub3 && - ipfs dag get $IPLDHASH/sub/beep/0 > sub4 && - ipfs dag get $IPLDHASH/sub/beep/1 > sub5 - ' - - test_expect_success "sub-objects look right" ' - echo -n "\"world\"" > sub1_exp && - test_cmp sub1_exp sub1 && - echo -n "{\"beep\":[0,\"bop\"],\"dict\":\"ionary\"}" > sub2_exp && - test_cmp sub2_exp sub2 && - echo -n "[0,\"bop\"]" > sub3_exp && - test_cmp sub3_exp sub3 && - echo -n "0" > sub4_exp && - test_cmp sub4_exp sub4 && - echo -n "\"bop\"" > sub5_exp && - test_cmp sub5_exp sub5 - ' - - test_expect_success "traversals using /ipld/ work" ' - ipfs dag get /ipld/$IPLDPBHASH/Data > ipld_path_Data_actual - ' - - test_expect_success "retrieved node looks right" ' - echo -n "{\"/\":{\"bytes\":\"AAECAwQ\"}}" > ipld_path_Data_expected - test_cmp ipld_path_Data_actual ipld_path_Data_expected - ' - - test_expect_success "can pin ipld object" ' - ipfs pin add $IPLDHASH - ' - - test_expect_success "can pin dag-pb object" ' - ipfs pin add $IPLDPBHASH - ' - - test_expect_success "can pin dag-cbor object" ' - ipfs pin add $IPLDCBORHASH - ' - - test_expect_success "can pin dag-json object" ' - ipfs pin add $IPLDJSONHASH - ' - - test_expect_success "after gc, objects still accessible" ' - ipfs repo gc > /dev/null && - ipfs refs -r --timeout=2s $IPLDJSONHASH > /dev/null - ' - - test_expect_success "can get object" ' - ipfs dag get $IPLDHASH > ipld_obj_out - ' - - test_expect_success "object links look right" ' - grep "{\"/\":\"" ipld_obj_out > /dev/null - ' - - test_expect_success "retrieved object hashes back correctly" ' - IPLDHASH2=$(cat ipld_obj_out | ipfs dag put --input-codec dag-json --store-codec dag-cbor) && - test "$IPLDHASH" = "$IPLDHASH2" - ' - - test_expect_success "add a normal file" ' - HASH=$(echo "foobar" | ipfs add -q) - ' - - test_expect_success "can view protobuf object with dag get" ' - ipfs dag get $HASH > dag_get_pb_out - ' - - test_expect_success "output looks correct" ' - echo -n "{\"Data\":{\"/\":{\"bytes\":\"CAISB2Zvb2JhcgoYBw\"}},\"Links\":[]}" > dag_get_pb_exp && - test_cmp dag_get_pb_exp dag_get_pb_out - ' - - test_expect_success "can call dag get with a path" ' - ipfs dag get $IPLDHASH/cats/0 > cat_out - ' - - test_expect_success "output looks correct" ' - echo -n "{\"Data\":{\"/\":{\"bytes\":\"CAISBGZvbwoYBA\"}},\"Links\":[]}" > cat_exp && - test_cmp cat_exp cat_out - ' - - test_expect_success "non-canonical dag-cbor input is normalized" ' - HASH=$(cat ../t0053-dag-data/non-canon.cbor | ipfs dag put --store-codec dag-cbor --input-codec dag-cbor) && - test $HASH = "bafyreiawx7ona7oa2ptcoh6vwq4q6bmd7x2ibtkykld327bgb7t73ayrqm" || - test_fsh echo $HASH - ' - - test_expect_success "cbor input can be fetched" ' - EXPARR=$(ipfs dag get $HASH/arr) - test $EXPARR = "[]" - ' - - test_expect_success "add an ipld with pin" ' - PINHASH=$(printf {\"foo\":\"bar\"} | ipfs dag put --input-codec dag-json --pin=true) - ' - - test_expect_success "after gc, objects still accessible" ' - ipfs repo gc > /dev/null && - ipfs refs -r --timeout=2s $PINHASH > /dev/null - ' - - test_expect_success "can add an ipld object with sha3-512 hash" ' - IPLDHASH=$(cat ipld_object | ipfs dag put --hash sha3-512) - ' - - test_expect_success "output looks correct" ' - EXPHASH="bafyriqforjma7y7akqz7nhuu73r6liggj5zhkbfiqgicywe3fgkna2ijlhod2af3ue7doj56tlzt5hh6iu5esafc4msr3sd53jol5m2o25ucy" - test $EXPHASH = $IPLDHASH - ' - - test_expect_success "prepare dag-pb object" ' - echo foo > test_file && - HASH=$(ipfs add -wQ test_file | ipfs cid base32) - ' - - test_expect_success "dag put with json dag-pb works" ' - ipfs dag get $HASH > pbjson && - cat pbjson | ipfs dag put --store-codec=dag-pb --input-codec=dag-json > dag_put_out - ' - - test_expect_success "dag put with dag-pb works output looks good" ' - echo $HASH > dag_put_exp && - test_cmp dag_put_exp dag_put_out - ' - - test_expect_success "dag put with raw dag-pb works" ' - ipfs block get $HASH > pbraw && - cat pbraw | ipfs dag put --store-codec=dag-pb --input-codec=dag-pb > dag_put_out - ' - - test_expect_success "dag put with dag-pb works output looks good" ' - echo $HASH > dag_put_exp && - test_cmp dag_put_exp dag_put_out - ' - - test_expect_success "dag put with raw node works" ' - echo "foo bar" > raw_node_in && - HASH=$(ipfs dag put --store-codec=raw --input-codec=raw -- raw_node_in) && - ipfs block get "$HASH" > raw_node_out && - test_cmp raw_node_in raw_node_out' - - test_expect_success "dag put multiple files" ' - printf {\"foo\":\"bar\"} > a.json && - printf {\"foo\":\"baz\"} > b.json && - ipfs dag put a.json b.json > dag_put_out - ' - - test_expect_success "dag put multiple files output looks good" ' - echo bafyreiblaotetvwobe7cu2uqvnddr6ew2q3cu75qsoweulzku2egca4dxq > dag_put_exp && - echo bafyreibqp7zvp6dvrqhtkbwuzzk7jhtmfmngtiqjajqpm6gtw55o7kqzfi >> dag_put_exp && - - test_cmp dag_put_exp dag_put_out - ' - - test_expect_success "prepare data for dag resolve" ' - NESTED_HASH=$(echo "{\"data\":123}" | ipfs dag put) && - HASH=$(echo "{\"obj\":{\"/\":\"${NESTED_HASH}\"}}" | ipfs dag put) - ' - - test_expect_success "dag resolve some things" ' - ipfs dag resolve $HASH > resolve_hash && - ipfs dag resolve ${HASH}/obj > resolve_obj && - ipfs dag resolve ${HASH}/obj/data > resolve_data - ' - - test_expect_success "dag resolve output looks good" ' - printf $HASH > resolve_hash_exp && - printf $NESTED_HASH > resolve_obj_exp && - printf $NESTED_HASH/data > resolve_data_exp && - - test_cmp resolve_hash_exp resolve_hash && - test_cmp resolve_obj_exp resolve_obj && - test_cmp resolve_data_exp resolve_data - ' - - test_expect_success "get base32 version of hashes for testing" ' - HASHb32=$(ipfs cid base32 $HASH) && - NESTED_HASHb32=$(ipfs cid base32 $NESTED_HASH) - ' - - test_expect_success "dag resolve some things with --cid-base=base32" ' - ipfs dag resolve $HASH --cid-base=base32 > resolve_hash && - ipfs dag resolve ${HASH}/obj --cid-base=base32 > resolve_obj && - ipfs dag resolve ${HASH}/obj/data --cid-base=base32 > resolve_data - ' - - test_expect_success "dag resolve output looks good with --cid-base=base32" ' - printf $HASHb32 > resolve_hash_exp && - printf $NESTED_HASHb32 > resolve_obj_exp && - printf $NESTED_HASHb32/data > resolve_data_exp && - - test_cmp resolve_hash_exp resolve_hash && - test_cmp resolve_obj_exp resolve_obj && - test_cmp resolve_data_exp resolve_data - ' - - test_expect_success "dag resolve some things with base32 hash" ' - ipfs dag resolve $HASHb32 > resolve_hash && - ipfs dag resolve ${HASHb32}/obj > resolve_obj && - ipfs dag resolve ${HASHb32}/obj/data > resolve_data - ' - - test_expect_success "dag resolve output looks good with base32 hash" ' - printf $HASHb32 > resolve_hash_exp && - printf $NESTED_HASHb32 > resolve_obj_exp && - printf $NESTED_HASHb32/data > resolve_data_exp && - - test_cmp resolve_hash_exp resolve_hash && - test_cmp resolve_obj_exp resolve_obj && - test_cmp resolve_data_exp resolve_data - ' - - -} - -# should work offline -test_dag_cmd - -# should work online -test_launch_ipfs_daemon -test_dag_cmd -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0054-dag-car-import-export.sh b/test/sharness/t0054-dag-car-import-export.sh deleted file mode 100755 index e277cc46687..00000000000 --- a/test/sharness/t0054-dag-car-import-export.sh +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/env bash -# - -test_description="Test car file import/export functionality" - -. lib/test-lib.sh -export -f ipfsi - -set -o pipefail - -tar -C ../t0054-dag-car-import-export-data/ --strip-components=1 -Jxf ../t0054-dag-car-import-export-data/test_dataset_car.tar.xz -tab=$'\t' - -test_cmp_sorted() { - # use test_cmp to dump out the unsorted file contents as a diff - [[ "$( sort "$1" | sha256sum )" == "$( sort "$2" | sha256sum )" ]] \ - || test_cmp "$1" "$2" -} -export -f test_cmp_sorted - -reset_blockstore() { - node=$1 - - ipfsi "$node" pin ls --quiet --type=recursive | ipfsi "$node" pin rm &>/dev/null - ipfsi "$node" repo gc &>/dev/null - - test_expect_success "pinlist empty" ' - [[ -z "$( ipfsi $node pin ls )" ]] - ' - test_expect_success "nothing left to gc" ' - [[ -z "$( ipfsi $node repo gc )" ]] - ' -} - -# hammer with concurrent gc to ensure nothing clashes -do_import() { - node="$1"; shift - ( - touch spin.gc - - while [[ -e spin.gc ]]; do ipfsi "$node" repo gc &>/dev/null; done & - while [[ -e spin.gc ]]; do ipfsi "$node" repo gc &>/dev/null; done & - - ipfsi "$node" dag import "$@" 2>&1 && ipfsi "$node" repo verify &>/dev/null - result=$? - - rm -f spin.gc &>/dev/null - wait || true # work around possible trigger of a bash bug on overloaded circleci - exit $result - ) -} - -run_online_imp_exp_tests() { - - reset_blockstore 0 - reset_blockstore 1 - - cat > basic_import_stats_expected < basic_import_expected - - # Explainer: - # naked_root_import_json_expected output is produced by dag import of combined_naked_roots_genesis_and_128.car - # executed when roots are already present in the repo - thus the BlockCount=0 - # (if blocks were not present in the repo, ipld: could not find would be returned) - cat >naked_root_import_json_expected < basic_import_actual - ' - - test_expect_success "basic import output as expected" ' - test_cmp_sorted basic_import_expected basic_import_actual - ' - - test_expect_success "basic import with --stats" ' - do_import 0 --stats \ - ../t0054-dag-car-import-export-data/combined_naked_roots_genesis_and_128.car \ - ../t0054-dag-car-import-export-data/lotus_testnet_export_128_shuffled_nulroot.car \ - ../t0054-dag-car-import-export-data/lotus_devnet_genesis_shuffled_nulroot.car \ - > basic_import_actual - ' - - test_expect_success "basic import output with --stats as expected" ' - test_cmp_sorted basic_import_stats_expected basic_import_actual - ' - - test_expect_success "basic fetch+export 1" ' - ipfsi 1 dag export bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy > reexported_testnet_128.car - ' - test_expect_success "export of shuffled testnet export identical to canonical original" ' - test_cmp reexported_testnet_128.car ../t0054-dag-car-import-export-data/lotus_testnet_export_128.car - ' - - test_expect_success "basic fetch+export 2" ' - ipfsi 1 dag export bafy2bzaceaxm23epjsmh75yvzcecsrbavlmkcxnva66bkdebdcnyw3bjrc74u > reexported_devnet_genesis.car - ' - test_expect_success "export of shuffled devnet export identical to canonical original" ' - test_cmp reexported_devnet_genesis.car ../t0054-dag-car-import-export-data/lotus_devnet_genesis.car - ' - - test_expect_success "pinlist on node1 still empty" ' - [[ -z "$( ipfsi 1 pin ls )" ]] - ' - - test_expect_success "import/pin naked roots only, relying on local blockstore having all the data" ' - ipfsi 1 dag import --stats --enc=json ../t0054-dag-car-import-export-data/combined_naked_roots_genesis_and_128.car \ - > naked_import_result_json_actual - ' - - test_expect_success "naked import output as expected" ' - test_cmp_sorted naked_root_import_json_expected naked_import_result_json_actual - ' - - reset_blockstore 0 - reset_blockstore 1 - - mkfifo pipe_testnet - mkfifo pipe_devnet - - test_expect_success "fifo import" ' - ( - cat ../t0054-dag-car-import-export-data/lotus_testnet_export_128_shuffled_nulroot.car > pipe_testnet & - cat ../t0054-dag-car-import-export-data/lotus_devnet_genesis_shuffled_nulroot.car > pipe_devnet & - - do_import 0 --stats \ - pipe_testnet \ - pipe_devnet \ - ../t0054-dag-car-import-export-data/combined_naked_roots_genesis_and_128.car \ - > basic_fifo_import_actual - result=$? - - wait || true # work around possible trigger of a bash bug on overloaded circleci - exit "$result" - ) - ' - - test_expect_success "remove fifos" ' - rm pipe_testnet pipe_devnet - ' - - test_expect_success "fifo-import output as expected" ' - test_cmp_sorted basic_import_stats_expected basic_fifo_import_actual - ' -} - - -test_expect_success "set up testbed" ' - iptb testbed create -type localipfs -count 2 -force -init -' -startup_cluster 2 - -run_online_imp_exp_tests - -test_expect_success "shut down nodes" ' - iptb stop && iptb_wait_stop -' - - -# We want to just init the repo, without using a daemon for stuff below -test_init_ipfs --empty-repo=false - - -test_expect_success "basic offline export of 'getting started' dag works" ' - ipfs dag export "$HASH_WELCOME_DOCS" >/dev/null -' - -test_expect_success "basic offline export of nonexistent cid" ' - ! ipfs dag export QmYwAPJXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 2> offline_fetch_error_actual >/dev/null -' -test_expect_success "correct error" ' - test_should_contain "Error: block was not found locally (offline): ipld: could not find QmYwAPJXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" offline_fetch_error_actual -' - -cat >multiroot_import_json_stats_expected < multiroot_import_json_expected - -test_expect_success "multiroot import works (--enc=json)" ' - ipfs dag import --enc=json ../t0054-dag-car-import-export-data/lotus_testnet_export_256_multiroot.car > multiroot_import_json_actual -' -test_expect_success "multiroot import expected output" ' - test_cmp_sorted multiroot_import_json_expected multiroot_import_json_actual -' - -test_expect_success "multiroot import works with --stats" ' - ipfs dag import --stats --enc=json ../t0054-dag-car-import-export-data/lotus_testnet_export_256_multiroot.car > multiroot_import_json_actual -' -test_expect_success "multiroot import expected output" ' - test_cmp_sorted multiroot_import_json_stats_expected multiroot_import_json_actual -' - - -cat >pin_import_expected << EOE -{"Stats":{"BlockCount":1198,"BlockBytesCount":468513}} -EOE -test_expect_success "pin-less import works" ' - ipfs dag import --stats --enc=json --pin-roots=false \ - ../t0054-dag-car-import-export-data/lotus_devnet_genesis.car \ - ../t0054-dag-car-import-export-data/lotus_testnet_export_128.car \ - > no-pin_import_actual -' -test_expect_success "expected no pins on --pin-roots=false" ' - test_cmp pin_import_expected no-pin_import_actual -' - - -test_expect_success "naked root import works" ' - ipfs dag import --stats --enc=json ../t0054-dag-car-import-export-data/combined_naked_roots_genesis_and_128.car \ - > naked_root_import_json_actual -' -test_expect_success "naked root import expected output" ' - test_cmp_sorted naked_root_import_json_expected naked_root_import_json_actual -' - -test_expect_success "'ipfs dag import' check block size" ' - BIG_CID=$(dd if=/dev/zero bs=2MB count=1 | ipfs dag put --input-codec=raw --store-codec=raw --allow-big-block) && - ipfs dag export $BIG_CID > 2-MB-block.car && - test_expect_code 1 ipfs dag import 2-MB-block.car >dag_import_out 2>&1 -' -test_expect_success "ipfs dag import output has the correct error" ' - grep "block is over 1MiB" dag_import_out -' - -test_expect_success "ipfs dag import --allow-big-block works" ' - test_expect_code 0 ipfs dag import --allow-big-block 2-MB-block.car -' - -cat > version_2_import_expected << EOE -{"Root":{"Cid":{"/":"bafy2bzaceaxm23epjsmh75yvzcecsrbavlmkcxnva66bkdebdcnyw3bjrc74u"},"PinErrorMsg":""}} -{"Root":{"Cid":{"/":"bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy"},"PinErrorMsg":""}} -{"Stats":{"BlockCount":1198,"BlockBytesCount":468513}} -EOE - -test_expect_success "version 2 import" ' - ipfs dag import --stats --enc=json \ - ../t0054-dag-car-import-export-data/lotus_testnet_export_128_v2.car \ - ../t0054-dag-car-import-export-data/lotus_devnet_genesis_v2.car \ - > version_2_import_actual -' - -test_expect_success "version 2 import output as expected" ' - test_cmp_sorted version_2_import_expected version_2_import_actual -' - -test_expect_success "'ipfs dag import' decode IPLD 'dag-json' codec works" ' - NEW_HASH=$(echo "{ \"test\": \"dag-json\" }" | ipfs dag put --store-codec dag-json) && - ipfs dag export $NEW_HASH > dag-json.car && - ipfs dag import dag-json.car && - rm dag-json.car -' - -test_expect_success "'ipfs dag import' decode IPLD 'dag-cbor' codec works" ' - NEW_HASH=$(echo "{ \"test\": \"dag-cbor\" }" | ipfs dag put --store-codec dag-cbor) && - ipfs dag export $NEW_HASH > dag-cbor.car && - ipfs dag import dag-cbor.car && - rm dag-cbor.car -' - -test_expect_success "'ipfs dag import' decode IPLD 'json' codec works" ' - NEW_HASH=$(echo "{ \"test\": \"json\" }" | ipfs dag put --store-codec json) && - ipfs dag export $NEW_HASH > json.car && - ipfs dag import json.car && - rm json.car -' - -test_expect_success "'ipfs dag import' decode IPLD 'cbor' codec works" ' - NEW_HASH=$(echo "{ \"test\": \"cbor\" }" | ipfs dag put --store-codec cbor) && - ipfs dag export $NEW_HASH > cbor.car && - ipfs dag import cbor.car && - rm cbor.car -' - -# IPIP-402 -cat > partial_nopin_import_expected << EOE -{"Stats":{"BlockCount":1,"BlockBytesCount":1618}} -EOE -test_expect_success "'ipfs dag import' without pinning works fine with incomplete DAG (unixfs dir exported as dag-scope=entity from IPIP-402)" ' - ipfs dag import --stats --enc=json --pin-roots=false ../t0054-dag-car-import-export-data/partial-dag-scope-entity.car >partial_nopin_import_out 2>&1 && - test_cmp partial_nopin_import_expected partial_nopin_import_out -' - -test_expect_success "'ipfs dag import' with pinning errors due to incomplete DAG (unixfs dir exported as dag-scope=entity from IPIP-402)" ' - ipfs dag import --stats --enc=json --pin-roots=true ../t0054-dag-car-import-export-data/partial-dag-scope-entity.car >partial_pin_import_out 2>&1 && - test_should_contain "\"PinErrorMsg\":\"block was not found locally" partial_pin_import_out -' - -test_expect_success "'ipfs dag import' pin error in default CLI mode produces exit code 1 (unixfs dir exported as dag-scope=entity from IPIP-402)" ' - test_expect_code 1 ipfs dag import ../t0054-dag-car-import-export-data/partial-dag-scope-entity.car >partial_pin_import_out 2>&1 && - test_should_contain "Error: pinning root \"QmPDC11yLAbVw3dX5jMeEuSdk4BiVjSd9X87zaYRdVjzW3\" FAILED: block was not found locally" partial_pin_import_out -' - -test_done diff --git a/test/sharness/t0055-dag-put-json-new-line.sh b/test/sharness/t0055-dag-put-json-new-line.sh deleted file mode 100755 index 1fde33ab61d..00000000000 --- a/test/sharness/t0055-dag-put-json-new-line.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -test_description='Test retrieval of JSON put as CBOR does not end with new-line' - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success 'create test JSON files' ' - WANT_JSON="{\"data\":1234}" - WANT_HASH="bafyreidbm2zncsc3j25zn7lofgd4woeh6eygdy73thfosuni2rwr3bhcvu" - printf "${WANT_JSON}\n" > with_newline.json && - printf "${WANT_JSON}" > without_newline.json -' - -test_expect_success 'puts as CBOR work' ' - GOT_HASH_WITHOUT_NEWLINE="$(cat without_newline.json | ipfs dag put --store-codec dag-cbor)" - GOT_HASH_WITH_NEWLINE="$(cat with_newline.json | ipfs dag put --store-codec dag-cbor)" -' - -test_expect_success 'put hashes with or without newline are equal' ' - test "${GOT_HASH_WITH_NEWLINE}" = "${GOT_HASH_WITHOUT_NEWLINE}" -' - -test_expect_success 'hashes are of expected value' ' - test "${WANT_HASH}" = "${GOT_HASH_WITH_NEWLINE}" - test "${WANT_HASH}" = "${GOT_HASH_WITHOUT_NEWLINE}" -' - -test_expect_success "retrieval by hash does not have new line" ' - ipfs dag get "${WANT_HASH}" > got.json - test_cmp without_newline.json got.json -' - -test_done diff --git a/test/sharness/t0062-daemon-api.sh b/test/sharness/t0062-daemon-api.sh deleted file mode 100755 index 3eb31b1c731..00000000000 --- a/test/sharness/t0062-daemon-api.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env bash -# -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test daemon command" - -. lib/test-lib.sh - -test_init_ipfs - -differentport=$((API_PORT + 1)) -api_other="/ip4/127.0.0.1/tcp/$differentport" -api_unreachable="/ip4/127.0.0.1/tcp/1" - -test_expect_success "config setup" ' - peerid=$(ipfs config Identity.PeerID) && - test_check_peerid "$peerid" -' - -test_client() { - opts="$@" - echo "OPTS = " $opts - test_expect_success "client must work properly $state" ' - printf "$peerid" >expected && - ipfs id -f="" $opts >actual && - test_cmp expected actual - ' -} - -test_client_must_fail() { - opts="$@" - echo "OPTS = " $opts - test_expect_success "client should fail $state" ' - echo "Error: cannot connect to the api. Is the daemon running? To run as a standalone CLI command remove the api file in \`\$IPFS_PATH/api\`" >expected_err && - test_must_fail ipfs id -f="" $opts >actual 2>actual_err && - test_cmp expected_err actual_err - ' -} - -test_client_suite() { - state="$1" - cfg_success="$2" - diff_success="$3" - api_fromcfg="$4" - api_different="$5" - - # must always work - test_client - - # must always err - test_client_must_fail --api "$api_unreachable" - - if [ "$cfg_success" = true ]; then - test_client --api "$api_fromcfg" - else - test_client_must_fail --api "$api_fromcfg" - fi - - if [ "$diff_success" = true ]; then - test_client --api "$api_different" - else - test_client_must_fail --api "$api_different" - fi -} - -# first, test things without daemon, without /api file -# with no daemon, everything should fail -# (using unreachable because API_MADDR doesn't get set until daemon start) -test_client_suite "(daemon off, no --api, no /api file)" false false "$api_unreachable" "$api_other" - - -# then, test things with daemon, with /api file - -test_launch_ipfs_daemon - -test_expect_success "'ipfs daemon' creates api file" ' - test -f ".ipfs/api" -' - -test_client_suite "(daemon on, no --api, /api file from cfg)" true false "$API_MADDR" "$api_other" - -# then, test things without daemon, with /api file - -test_kill_ipfs_daemon - -# again, both should fail -test_client_suite "(daemon off, no --api, /api file from cfg)" false false "$API_MADDR" "$api_other" - -test_done diff --git a/test/sharness/t0063-daemon-init.sh b/test/sharness/t0063-daemon-init.sh deleted file mode 100755 index 8826ad24686..00000000000 --- a/test/sharness/t0063-daemon-init.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Juan Batiz-Benet -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test daemon --init command" - -. lib/test-lib.sh - -# We don't want the normal test_init_ipfs but we need to make sure the -# IPFS_PATH is set correctly. -export IPFS_PATH="$(pwd)/.ipfs" - -# safety check since we will be removing the directory -if [ -e "$IPFS_PATH" ]; then - echo "$IPFS_PATH exists" - exit 1 -fi - -test_ipfs_daemon_init() { - # Doing it manually since we want to launch the daemon with an - # empty or non-existent repo; the normal - # test_launch_ipfs_daemon does not work since it assumes the - # repo was created a particular way with regard to the API - # server. - - test_expect_success "'ipfs daemon --init' succeeds" ' - ipfs daemon --init --init-profile=test >actual_daemon 2>daemon_err & - IPFS_PID=$! - sleep 2 && - if ! kill -0 $IPFS_PID; then cat daemon_err; return 1; fi - ' - - test_expect_success "'ipfs daemon' can be killed" ' - test_kill_repeat_10_sec $IPFS_PID - ' -} - -test_expect_success "remove \$IPFS_PATH dir" ' - rm -rf "$IPFS_PATH" -' -test_ipfs_daemon_init - -test_expect_success "create empty \$IPFS_PATH dir" ' - rm -rf "$IPFS_PATH" && - mkdir "$IPFS_PATH" -' - -test_ipfs_daemon_init - -test_done diff --git a/test/sharness/t0063-external.sh b/test/sharness/t0063-external.sh deleted file mode 100755 index 6a849438a17..00000000000 --- a/test/sharness/t0063-external.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test external command functionality" - -. lib/test-lib.sh - - -# set here so daemon launches with it -PATH=`pwd`/bin:$PATH - -test_init_ipfs - -test_expect_success "create fake ipfs-update bin" ' - mkdir bin && - echo "#!/bin/sh" > bin/ipfs-update && - echo "pwd" >> bin/ipfs-update && - echo "test -e \"$IPFS_PATH/repo.lock\" || echo \"repo not locked\" " >> bin/ipfs-update && - chmod +x bin/ipfs-update && - mkdir just_for_test -' - -test_expect_success "external command runs from current user directory and doesn't lock repo" ' - (cd just_for_test && ipfs update) > actual -' - -test_expect_success "output looks good" ' - echo `pwd`/just_for_test > exp && - echo "repo not locked" >> exp && - test_cmp exp actual -' - -test_launch_ipfs_daemon - -test_expect_success "external command runs from current user directory when daemon is running" ' - (cd just_for_test && ipfs update) > actual -' - -test_expect_success "output looks good" ' - echo `pwd`/just_for_test > exp && - test_cmp exp actual -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0064-api-file.sh b/test/sharness/t0064-api-file.sh deleted file mode 100755 index 67879fe82f8..00000000000 --- a/test/sharness/t0064-api-file.sh +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env bash -# -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test api file" - -. lib/test-lib.sh - -test_init_ipfs - - -test_launch_ipfs_daemon -test_kill_ipfs_daemon - -test_expect_success "version always works" ' - ipfs version >/dev/null -' - -test_expect_success "swarm peers fails when offline" ' - test_must_fail ipfs swarm peers >/dev/null -' - -test_expect_success "swarm peers fails when offline and API specified" ' - test_must_fail ipfs swarm peers --api="$API_MADDR" >/dev/null -' - -test_expect_success "pin ls succeeds when offline" ' - ipfs pin ls >/dev/null -' - -test_expect_success "pin ls fails when offline and API specified" ' - test_must_fail ipfs pin ls --api="$API_MADDR" >/dev/null -' - -test_expect_success "id succeeds when offline" ' - ipfs id >/dev/null -' - -test_expect_success "id fails when offline API specified" ' - test_must_fail ipfs id --api="$API_MADDR" >/dev/null -' - -test_expect_success "create API file" ' - echo "$API_MADDR" > "$IPFS_PATH/api" -' - -test_expect_success "version always works" ' - ipfs version >/dev/null -' - -test_expect_success "id succeeds when offline and API file exists" ' - ipfs id >/dev/null -' - -test_expect_success "pin ls succeeds when offline and API file exists" ' - ipfs pin ls >/dev/null -' - -test_launch_ipfs_daemon - -test_expect_success "version always works" ' - ipfs version >/dev/null -' - -test_expect_success "id succeeds when online" ' - ipfs id >/dev/null -' - -test_expect_success "swarm peers succeeds when online" ' - ipfs swarm peers >/dev/null -' - -test_expect_success "pin ls succeeds when online" ' - ipfs pin ls >/dev/null -' - -test_expect_success "remove API file when daemon is running" ' - rm "$IPFS_PATH/api" -' - -test_expect_success "version always works" ' - ipfs version >/dev/null -' - -test_expect_success "swarm peers fails when the API file is missing" ' - test_must_fail ipfs swarm peers >/dev/null -' - -test_expect_success "id fails when daemon is running but API file is missing (locks repo)" ' - test_must_fail ipfs pin ls >/dev/null -' - -test_expect_success "pin ls fails when daemon is running but API file is missing (locks repo)" ' - test_must_fail ipfs pin ls >/dev/null -' - -test_kill_ipfs_daemon - -APIPORT=32563 - -test_expect_success "Verify gateway file diallable while on unspecified" ' - ipfs config Addresses.API /ip4/0.0.0.0/tcp/$APIPORT && - test_launch_ipfs_daemon && - cat "$IPFS_PATH/api" > api_file_actual && - echo -n "/ip4/127.0.0.1/tcp/$APIPORT" > api_file_expected && - test_cmp api_file_expected api_file_actual -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0065-active-requests.sh b/test/sharness/t0065-active-requests.sh deleted file mode 100755 index e73e1198994..00000000000 --- a/test/sharness/t0065-active-requests.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test active request commands" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon - -test_expect_success "command works" ' - ipfs diag cmds > cmd_out -' - -test_expect_success "invoc shows up in output" ' - grep "diag/cmds" cmd_out > /dev/null -' - -test_expect_success "start longer running command" ' - ipfs log tail & - LOGPID=$! - go-sleep 100ms -' - -test_expect_success "long running command shows up" ' - ipfs diag cmds > cmd_out2 -' - -test_expect_success "output looks good" ' - grep "log/tail" cmd_out2 | grep "true" > /dev/null -' - -test_expect_success "kill log cmd" ' - kill $LOGPID - go-sleep 0.5s - kill $LOGPID - - wait $LOGPID || true -' - -test_expect_success "long running command inactive" ' - ipfs diag cmds > cmd_out3 -' - -test_expect_success "command shows up as inactive" ' - grep "log/tail" cmd_out3 | grep "false" -' - -test_kill_ipfs_daemon -test_done diff --git a/test/sharness/t0066-migration.sh b/test/sharness/t0066-migration.sh deleted file mode 100755 index fa6a10e02fe..00000000000 --- a/test/sharness/t0066-migration.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test migrations auto update prompt" - -. lib/test-lib.sh - -test_init_ipfs - -MIGRATION_START=7 -IPFS_REPO_VER=$(<.ipfs/version) - -# Generate mock migration binaries -gen_mock_migrations() { - mkdir bin - i=$((MIGRATION_START)) - until [ $i -ge $IPFS_REPO_VER ] - do - j=$((i+1)) - echo "#!/bin/bash" > bin/fs-repo-${i}-to-${j} - echo "echo fake applying ${i}-to-${j} repo migration" >> bin/fs-repo-${i}-to-${j} - chmod +x bin/fs-repo-${i}-to-${j} - ((i++)) - done -} - -# Check for expected output from each migration -check_migration_output() { - out_file="$1" - i=$((MIGRATION_START)) - until [ $i -ge $IPFS_REPO_VER ] - do - j=$((i+1)) - grep "applying ${i}-to-${j} repo migration" "$out_file" > /dev/null - ((i++)) - done -} - -# Create fake migration binaries instead of letting ipfs download from network -# To test downloading and running actual binaries, comment out this test. -test_expect_success "setup mock migrations" ' - gen_mock_migrations && - find bin -name "fs-repo-*-to-*" | wc -l > mock_count && - echo $((IPFS_REPO_VER-MIGRATION_START)) > expect_mock_count && - export PATH="$(pwd)/bin":$PATH && - test_cmp mock_count expect_mock_count -' - -test_expect_success "manually reset repo version to $MIGRATION_START" ' - echo "$MIGRATION_START" > "$IPFS_PATH"/version -' - -test_expect_success "ipfs daemon --migrate=false fails" ' - test_expect_code 1 ipfs daemon --migrate=false > false_out -' - -test_expect_success "output looks good" ' - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" false_out -' - -# The migrations will succeed, but the daemon will still exit with 1 because -# the fake migrations do not update the repo version number. -# -# If run with real migrations, the daemon continues running and must be killed. -test_expect_success "ipfs daemon --migrate=true runs migration" ' - test_expect_code 1 ipfs daemon --migrate=true > true_out -' - -test_expect_success "output looks good" ' - check_migration_output true_out && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null -' - -test_expect_success "'ipfs daemon' prompts to auto migrate" ' - test_expect_code 1 ipfs daemon > daemon_out 2> daemon_err -' - -test_expect_success "output looks good" ' - grep "Found outdated fs-repo" daemon_out > /dev/null && - grep "Run migrations now?" daemon_out > /dev/null && - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" daemon_out > /dev/null -' - -test_expect_success "ipfs repo migrate succeed" ' - test_expect_code 0 ipfs repo migrate > migrate_out -' - -test_expect_success "output looks good" ' - grep "Found outdated fs-repo, starting migration." migrate_out > /dev/null && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null -' - -test_expect_success "manually reset repo version to latest" ' - echo "$IPFS_REPO_VER" > "$IPFS_PATH"/version -' - -test_expect_success "detect repo does not need migration" ' - test_expect_code 0 ipfs repo migrate > migrate_out -' - -test_expect_success "output looks good" ' - grep "Repo does not require migration" migrate_out > /dev/null -' - -# ensure that we get a lock error if we need to migrate and the daemon is running -test_launch_ipfs_daemon - -test_expect_success "manually reset repo version to $MIGRATION_START" ' - echo "$MIGRATION_START" > "$IPFS_PATH"/version -' - -test_expect_success "ipfs repo migrate fails" ' - test_expect_code 1 ipfs repo migrate 2> migrate_out -' - -test_expect_success "output looks good" ' - grep "repo.lock" migrate_out > /dev/null -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0067-unix-api.sh b/test/sharness/t0067-unix-api.sh deleted file mode 100755 index 4f1e34ca4af..00000000000 --- a/test/sharness/t0067-unix-api.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test unix API transport" - -. lib/test-lib.sh - -test_init_ipfs - -# We can't use the trash dir as the full name must be longer less than 108 bytes -# long (because that's the max unix domain socket path length). -SOCKDIR="$(mktemp -d "${TMPDIR:-/tmp}/unix-api-sharness.XXXXXX")" - -test_expect_success "configure" ' - peerid=$(ipfs config Identity.PeerID) && - ipfs config Addresses.API "/unix/$SOCKDIR/sock" -' - -test_launch_ipfs_daemon - -test_expect_success "client works" ' - printf "$peerid" >expected && - ipfs --api="/unix/$SOCKDIR/sock" id -f="" >actual && - test_cmp expected actual -' - -test_kill_ipfs_daemon -test_done diff --git a/test/sharness/t0070-user-config.sh b/test/sharness/t0070-user-config.sh deleted file mode 100755 index 63c26ea3afb..00000000000 --- a/test/sharness/t0070-user-config.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Brian Holder-Chow Lin On -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test user-provided config values" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "bootstrap doesn't overwrite user-provided config keys (top-level)" ' - ipfs config Foo.Bar baz && - ipfs bootstrap rm --all && - echo "baz" >expected && - ipfs config Foo.Bar >actual && - test_cmp expected actual -' - -test_done diff --git a/test/sharness/t0080-repo.sh b/test/sharness/t0080-repo.sh deleted file mode 100755 index 3f33a5f440b..00000000000 --- a/test/sharness/t0080-repo.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo operations" - -. lib/test-lib.sh - -test_init_ipfs --empty-repo=false -test_launch_ipfs_daemon_without_network - -test_expect_success "'ipfs repo gc' succeeds" ' - ipfs repo gc >gc_out_actual -' - -test_expect_success "'ipfs add afile' succeeds" ' - echo "some text" >afile && - HASH=`ipfs add -q afile` -' - -test_expect_success "added file was pinned" ' - ipfs pin ls --type=recursive >actual && - grep "$HASH" actual -' - -test_expect_success "'ipfs repo gc' succeeds" ' - ipfs repo gc >gc_out_actual -' - -test_expect_success "'ipfs repo gc' looks good (patch root)" ' - grep -v "removed $HASH" gc_out_actual -' - -test_expect_success "'ipfs repo gc' doesn't remove file" ' - ipfs cat "$HASH" >out && - test_cmp out afile -' - -test_expect_success "'ipfs pin rm' succeeds" ' - ipfs pin rm -r "$HASH" >actual1 -' - -test_expect_success "'ipfs pin rm' output looks good" ' - echo "unpinned $HASH" >expected1 && - test_cmp expected1 actual1 -' - -test_expect_success "ipfs repo gc fully reverse ipfs add (part 1)" ' - ipfs repo gc && - random 100000 41 >gcfile && - find "$IPFS_PATH/blocks" -type f -name "*.data" | sort -u > expected_blocks && - hash=$(ipfs add -q gcfile) && - ipfs pin rm -r $hash && - ipfs repo gc -' -test_expect_success "'ipfs repo gc --silent' succeeds (no output)" ' - echo "should be empty" >bfile && - HASH2=`ipfs add -q bfile` && - ipfs cat "$HASH2" >expected11 && - test_cmp expected11 bfile && - ipfs pin rm -r "$HASH2" && - ipfs repo gc --silent >gc_out_empty && - test_cmp /dev/null gc_out_empty && - test_must_fail ipfs cat "$HASH2" 2>err_expected1 && - grep "Error: block was not found locally (offline): ipld: could not find $HASH2" err_expected1 -' - -test_kill_ipfs_daemon - -test_expect_success "ipfs repo gc fully reverse ipfs add (part 2)" ' - find "$IPFS_PATH/blocks" -type f -name "*.data" | sort -u > actual_blocks && - test_cmp expected_blocks actual_blocks -' - -test_launch_ipfs_daemon_without_network - -test_expect_success "file no longer pinned" ' - ipfs pin ls --type=recursive --quiet >actual2 && - test_expect_code 1 grep $HASH actual2 -' - -test_expect_success "recursively pin afile(default action)" ' - HASH=`ipfs add -q afile` && - ipfs pin add "$HASH" -' - -test_expect_success "recursively pin rm afile (default action)" ' - ipfs pin rm "$HASH" -' - -test_expect_success "recursively pin afile" ' - ipfs pin add -r "$HASH" -' - -test_expect_success "pinning directly should fail now" ' - echo "Error: pin: $HASH already pinned recursively" >expected3 && - test_must_fail ipfs pin add -r=false "$HASH" 2>actual3 && - test_cmp expected3 actual3 -' - -test_expect_success "'ipfs pin rm -r=false ' should fail" ' - echo "Error: $HASH is pinned recursively" >expected4 - test_must_fail ipfs pin rm -r=false "$HASH" 2>actual4 && - test_cmp expected4 actual4 -' - -test_expect_success "remove recursive pin, add direct" ' - echo "unpinned $HASH" >expected5 && - ipfs pin rm -r "$HASH" >actual5 && - test_cmp expected5 actual5 && - ipfs pin add -r=false "$HASH" -' - -test_expect_success "remove direct pin" ' - echo "unpinned $HASH" >expected6 && - ipfs pin rm "$HASH" >actual6 && - test_cmp expected6 actual6 -' - -test_expect_success "'ipfs repo gc' removes file" ' - ipfs block stat $HASH && - ipfs repo gc && - test_must_fail ipfs block stat $HASH -' - -# Convert all to a base32-multihash as refs local outputs cidv1 raw -# Technically converting refs local output would suffice, but this is more -# future proof if we ever switch to adding the files with cid-version 1. -test_expect_success "'ipfs refs local' no longer shows file" ' - EMPTY_DIR=QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn && - HASH_MH=`cid-fmt -b base32 "%M" "$HASH"` && - HARDCODED_HASH_MH=`cid-fmt -b base32 "%M" "QmYCvbfNbCwFR45HiNP45rwJgvatpiW38D961L5qAhUM5Y"` && - EMPTY_DIR_MH=`cid-fmt -b base32 "%M" "$EMPTY_DIR"` && - HASH_WELCOME_DOCS_MH=`cid-fmt -b base32 "%M" "$HASH_WELCOME_DOCS"` && - ipfs refs local | cid-fmt -b base32 --filter "%M" >actual8 && - grep "$HARDCODED_HASH_MH" actual8 && - grep "$EMPTY_DIR_MH" actual8 && - grep "$HASH_WELCOME_DOCS_MH" actual8 && - test_must_fail grep "$HASH_MH" actual8 -' - -test_expect_success "adding multiblock random file succeeds" ' - random 1000000 >multiblock && - MBLOCKHASH=`ipfs add -q multiblock` -' - -test_expect_success "'ipfs pin ls --type=indirect' is correct" ' - ipfs refs "$MBLOCKHASH" >refsout && - ipfs refs -r "$HASH_WELCOME_DOCS" >>refsout && - sed -i"~" "s/\(.*\)/\1 indirect/g" refsout && - ipfs pin ls --type=indirect >indirectpins && - test_sort_cmp refsout indirectpins -' - -test_expect_success "pin something directly" ' - echo "ipfs is so awesome" >awesome && - DIRECTPIN=`ipfs add -q awesome` && - echo "unpinned $DIRECTPIN" >expected9 && - ipfs pin rm -r "$DIRECTPIN" >actual9 && - test_cmp expected9 actual9 && - - echo "pinned $DIRECTPIN directly" >expected10 && - ipfs pin add -r=false "$DIRECTPIN" >actual10 && - test_cmp expected10 actual10 -' - -test_expect_success "'ipfs pin ls --type=direct' is correct" ' - echo "$DIRECTPIN direct" >directpinexpected && - ipfs pin ls --type=direct >directpinout && - test_sort_cmp directpinexpected directpinout -' - -test_expect_success "'ipfs pin ls --type=recursive' is correct" ' - echo "$MBLOCKHASH" >rp_expected && - echo "$HASH_WELCOME_DOCS" >>rp_expected && - echo "$EMPTY_DIR" >>rp_expected && - sed -i"~" "s/\(.*\)/\1 recursive/g" rp_expected && - ipfs pin ls --type=recursive >rp_actual && - test_sort_cmp rp_expected rp_actual -' - -test_expect_success "'ipfs pin ls --type=all --quiet' is correct" ' - cat directpinout >allpins && - cat rp_actual >>allpins && - cat indirectpins >>allpins && - cut -f1 -d " " allpins | sort | uniq >> allpins_uniq_hashes && - ipfs pin ls --type=all --quiet >actual_allpins && - test_sort_cmp allpins_uniq_hashes actual_allpins -' - -test_expect_success "'ipfs refs --unique' is correct" ' - mkdir -p uniques && - echo "content1" > uniques/file1 && - echo "content1" > uniques/file2 && - ROOT=$(ipfs add -r -Q uniques) && - ipfs refs --unique $ROOT >expected && - ipfs add -q uniques/file1 >unique_hash && - test_cmp expected unique_hash -' - -test_expect_success "'ipfs refs --unique --recursive' is correct" ' - mkdir -p a/b/c && - echo "c1" > a/f1 && - echo "c1" > a/b/f1 && - echo "c1" > a/b/c/f1 && - echo "c2" > a/b/c/f2 && - ROOT=$(ipfs add -r -Q a) && - ipfs refs --unique --recursive $ROOT >refs_output && - wc -l refs_output | sed "s/^ *//g" >line_count && - echo "4 refs_output" >expected && - test_cmp expected line_count || test_fsh cat refs_output -' - -test_expect_success "'ipfs refs --recursive (bigger)'" ' - mkdir -p b/c/d/e && - echo "content1" >b/f && - echo "content1" >b/c/f1 && - echo "content1" >b/c/d/f2 && - echo "content2" >b/c/f2 && - echo "content2" >b/c/d/f1 && - echo "content2" >b/c/d/e/f && - cp -r b b2 && mv b2 b/b2 && - cp -r b b3 && mv b3 b/b3 && - cp -r b b4 && mv b4 b/b4 && - hash=$(ipfs add -r -Q b) && - ipfs refs -r "$hash" >refs_output && - wc -l refs_output | sed "s/^ *//g" >actual && - echo "79 refs_output" >expected && - test_cmp expected actual || test_fsh cat refs_output -' - -test_expect_success "'ipfs refs --unique --recursive (bigger)'" ' - ipfs refs -r "$hash" >refs_output && - sort refs_output | uniq >expected && - ipfs refs -r -u "$hash" >actual && - test_sort_cmp expected actual || test_fsh cat refs_output -' - -get_field_num() { - field=$1 - file=$2 - num=$(grep "$field" "$file" | awk '{ print $2 }') - echo $num -} - -test_expect_success "'ipfs repo stat' succeeds" ' - ipfs repo stat > repo-stats -' - -test_expect_success "repo stats came out correct" ' - grep "RepoPath" repo-stats && - grep "RepoSize" repo-stats && - grep "NumObjects" repo-stats && - grep "Version" repo-stats && - grep "StorageMax" repo-stats -' - -test_expect_success "'ipfs repo stat --human' succeeds" ' - ipfs repo stat --human > repo-stats-human -' - -test_expect_success "repo stats --human came out correct" ' - grep "RepoPath" repo-stats-human && - grep -E "RepoSize:\s*([0-9]*[.])?[0-9]+\s+?(B|kB|MB|GB|TB|PB|EB)" repo-stats-human && - grep "NumObjects" repo-stats-human && - grep "Version" repo-stats-human && - grep -E "StorageMax:\s*([0-9]*[.])?[0-9]+\s+?(B|kB|MB|GB|TB|PB|EB)" repo-stats-human || - test_fsh cat repo-stats-human -' - -test_expect_success "'ipfs repo stat' after adding a file" ' - ipfs add repo-stats && - ipfs repo stat > repo-stats-2 -' - -test_expect_success "repo stats are updated correctly" ' - test $(get_field_num "RepoSize" repo-stats-2) -ge $(get_field_num "RepoSize" repo-stats) -' - -test_expect_success "'ipfs repo stat --size-only' succeeds" ' - ipfs repo stat --size-only > repo-stats-size-only -' - -test_expect_success "repo stats came out correct for --size-only" ' - grep "RepoSize" repo-stats-size-only && - grep "StorageMax" repo-stats-size-only && - grep -v "RepoPath" repo-stats-size-only && - grep -v "NumObjects" repo-stats-size-only && - grep -v "Version" repo-stats-size-only -' - -test_expect_success "'ipfs repo version' succeeds" ' - ipfs repo version > repo-version -' - -test_expect_success "repo version came out correct" ' - egrep "^ipfs repo version fs-repo@[0-9]+" repo-version >/dev/null -' - -test_expect_success "'ipfs repo version -q' succeeds" ' - ipfs repo version -q > repo-version-q -' -test_expect_success "repo version came out correct" ' - egrep "^fs-repo@[0-9]+" repo-version-q >/dev/null -' - -test_kill_ipfs_daemon - -test_expect_success "remove Datastore.StorageMax from config" ' - ipfs config Datastore.StorageMax "" -' -test_expect_success "'ipfs repo stat' still succeeds" ' - ipfs repo stat > repo-stats -' - -test_done diff --git a/test/sharness/t0081-repo-pinning.sh b/test/sharness/t0081-repo-pinning.sh deleted file mode 100755 index 92cb71c3858..00000000000 --- a/test/sharness/t0081-repo-pinning.sh +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo pinning" - -. lib/test-lib.sh - -test_pin_flag() { - object=$1 - ptype=$2 - expect=$3 - - echo "test_pin_flag" "$@" - - if ipfs pin ls --type="$ptype" "$object" >actual - then - test "$expect" = "true" && return - test_fsh cat actual - return - else - test "$expect" = "false" && return - test_fsh cat actual - return - fi -} - -test_pin() { - object=$1 - shift - - test_str_contains "recursive" $@ - [ "$?" = "0" ] && r="true" || r="false" - - test_str_contains "indirect" $@ - [ "$?" = "0" ] && i="true" || i="false" - - test_str_contains "direct" $@ - [ "$?" = "0" ] && d="true" || d="false" - - test_pin_flag "$object" "recursive" $r || return 1 - test_pin_flag "$object" "indirect" $i || return 1 - test_pin_flag "$object" "direct" $d || return 1 - return 0 -} - - -test_init_ipfs - -# test runs much faster without daemon. -# TODO: turn this back on after: -# https://github.com/ipfs/go-ipfs/issues/1075 -# test_launch_ipfs_daemon - -HASH_FILE6="QmRsBC3Y2G6VRPYGAVpZczx1W7Xw54MtM1NcLKTkn6rx3U" -HASH_FILE5="QmaN3PtyP8DcVGHi3Q2Fcp7CfAFVcVXKddWbHoNvaA41zf" -HASH_FILE4="QmV1aiVgpDknKQugrK59uBUbMrPnsQM1F9FXbFcfgEvUvH" -HASH_FILE3="QmZrr4Pzqp3NnMzMfbMhNe7LghfoUFHVx7c9Po9GZrhKZ7" -HASH_FILE2="QmSkjTornLY72QhmK9NvAz26815pTaoAL42rF8Qi3w2WBP" -HASH_FILE1="QmbgX4aXhSSY88GHmPQ4roizD8wFwPX8jzTLjc8VAp89x4" -HASH_DIR4="QmW98gV71Ns4bX7QbgWAqLiGF3SDC1JpveZSgBh4ExaSAd" -HASH_DIR3="QmRsCaNBMkweZ9vHT5PJRd2TT9rtNKEKyuognCEVxZxF1H" -HASH_DIR2="QmTUTQAgeVfughDSFukMZLbfGvetDJY7Ef5cDXkKK4abKC" -HASH_DIR1="QmNyZVFbgvmzguS2jVMRb8PQMNcCMJrn9E3doDhBbcPNTY" -HASH_NOPINDIR="QmWHjrRJYSfYKz5V9dWWSKu47GdY7NewyRhyTiroXgWcDU" -HASH_NOPIN_FILE1="QmUJT3GQi1dxQyTZbkaWeer9GkCn1d3W3HHRLSDr6PTcpx" -HASH_NOPIN_FILE2="QmarR7m9JT7qHEGhuFNZUEMAnoZ8E9QAfsthHCQ9Y2GfoT" - -DIR1="dir1" -DIR2="dir1/dir2" -DIR4="dir1/dir2/dir4" -DIR3="dir1/dir3" -FILE1="dir1/file1" -FILE2="dir1/file2" -FILE3="dir1/file3" -FILE4="dir1/dir2/file4" -FILE6="dir1/dir2/dir4/file6" -FILE5="dir1/dir3/file5" - -test_expect_success "'ipfs add dir' succeeds" ' - mkdir dir1 && - mkdir dir1/dir2 && - mkdir dir1/dir2/dir4 && - mkdir dir1/dir3 && - echo "some text 1" >dir1/file1 && - echo "some text 2" >dir1/file2 && - echo "some text 3" >dir1/file3 && - echo "some text 1" >dir1/dir2/file1 && - echo "some text 4" >dir1/dir2/file4 && - echo "some text 1" >dir1/dir2/dir4/file1 && - echo "some text 2" >dir1/dir2/dir4/file2 && - echo "some text 6" >dir1/dir2/dir4/file6 && - echo "some text 2" >dir1/dir3/file2 && - echo "some text 5" >dir1/dir3/file5 && - ipfs add -Q -r dir1 >actual && - echo "$HASH_DIR1" >expected && - ipfs repo gc && # remove the patch chaff - test_cmp expected actual -' - -test_expect_success "objects are there" ' - ipfs cat "$HASH_FILE6" >FILE6_a && - ipfs cat "$HASH_FILE5" >FILE5_a && - ipfs cat "$HASH_FILE4" >FILE4_a && - ipfs cat "$HASH_FILE3" >FILE3_a && - ipfs cat "$HASH_FILE2" >FILE2_a && - ipfs cat "$HASH_FILE1" >FILE1_a && - ipfs ls "$HASH_DIR3" >DIR3_a && - ipfs ls "$HASH_DIR4" >DIR4_a && - ipfs ls "$HASH_DIR2" >DIR2_a && - ipfs ls "$HASH_DIR1" >DIR1_a -' - -# saving this output for later -test_expect_success "ipfs dag get $HASH_DIR1 works" ' - ipfs dag get $HASH_DIR1 | jq -r ".Links[] | .Hash | .[\"/\"]" > DIR1_objlink -' - - -test_expect_success "added dir was pinned recursively" ' - test_pin_flag $HASH_DIR1 recursive true -' - -test_expect_success "rest were pinned indirectly" ' - test_pin_flag "$HASH_FILE6" indirect true - test_pin_flag "$HASH_FILE5" indirect true - test_pin_flag "$HASH_FILE4" indirect true - test_pin_flag "$HASH_FILE3" indirect true - test_pin_flag "$HASH_FILE2" indirect true - test_pin_flag "$HASH_FILE1" indirect true - test_pin_flag "$HASH_DIR3" indirect true - test_pin_flag "$HASH_DIR4" indirect true - test_pin_flag "$HASH_DIR2" indirect true -' - -test_expect_success "added dir was NOT pinned indirectly" ' - test_pin_flag "$HASH_DIR1" indirect false -' - -test_expect_success "nothing is pinned directly" ' - ipfs pin ls --type=direct >actual4 && - test_must_be_empty actual4 -' - -test_expect_success "'ipfs repo gc' succeeds" ' - ipfs repo gc >gc_out_actual -' - -test_expect_success "objects are still there" ' - cat FILE6_a FILE5_a FILE4_a FILE3_a FILE2_a FILE1_a >expected45 && - cat DIR3_a DIR4_a DIR2_a DIR1_a >>expected45 && - ipfs cat "$HASH_FILE6" >actual45 && - ipfs cat "$HASH_FILE5" >>actual45 && - ipfs cat "$HASH_FILE4" >>actual45 && - ipfs cat "$HASH_FILE3" >>actual45 && - ipfs cat "$HASH_FILE2" >>actual45 && - ipfs cat "$HASH_FILE1" >>actual45 && - ipfs ls "$HASH_DIR3" >>actual45 && - ipfs ls "$HASH_DIR4" >>actual45 && - ipfs ls "$HASH_DIR2" >>actual45 && - ipfs ls "$HASH_DIR1" >>actual45 && - test_cmp expected45 actual45 -' - -test_expect_success "remove dir recursive pin succeeds" ' - echo "unpinned $HASH_DIR1" >expected5 && - ipfs pin rm -r "$HASH_DIR1" >actual5 && - test_cmp expected5 actual5 -' - -test_expect_success "none are pinned any more" ' - test_pin "$HASH_FILE6" && - test_pin "$HASH_FILE5" && - test_pin "$HASH_FILE4" && - test_pin "$HASH_FILE3" && - test_pin "$HASH_FILE2" && - test_pin "$HASH_FILE1" && - test_pin "$HASH_DIR3" && - test_pin "$HASH_DIR4" && - test_pin "$HASH_DIR2" && - test_pin "$HASH_DIR1" -' - -test_expect_success "pin some directly and indirectly" ' - ipfs pin add -r=false "$HASH_DIR1" >actual7 && - ipfs pin add -r=true "$HASH_DIR2" >>actual7 && - ipfs pin add -r=false "$HASH_FILE1" >>actual7 && - echo "pinned $HASH_DIR1 directly" >expected7 && - echo "pinned $HASH_DIR2 recursively" >>expected7 && - echo "pinned $HASH_FILE1 directly" >>expected7 && - test_cmp expected7 actual7 -' - -test_expect_success "pin lists look good" ' - test_pin $HASH_DIR1 direct && - test_pin $HASH_DIR2 recursive && - test_pin $HASH_DIR3 && - test_pin $HASH_DIR4 indirect && - test_pin $HASH_FILE1 indirect direct && - test_pin $HASH_FILE2 indirect && - test_pin $HASH_FILE3 && - test_pin $HASH_FILE4 indirect && - test_pin $HASH_FILE5 && - test_pin $HASH_FILE6 indirect -' - -test_expect_success "'ipfs repo gc' succeeds" ' - ipfs repo gc && - test_must_fail ipfs block stat $HASH_FILE3 && - test_must_fail ipfs block stat $HASH_FILE5 && - test_must_fail ipfs block stat $HASH_DIR3 -' - -# use object links for HASH_DIR1 here because its children -# no longer exist -test_expect_success "some objects are still there" ' - cat FILE6_a FILE4_a FILE2_a FILE1_a >expected8 && - cat DIR4_a DIR2_a DIR1_objlink >>expected8 && - ipfs cat "$HASH_FILE6" >actual8 && - ipfs cat "$HASH_FILE4" >>actual8 && - ipfs cat "$HASH_FILE2" >>actual8 && - ipfs cat "$HASH_FILE1" >>actual8 && - ipfs ls "$HASH_DIR4" >>actual8 && - ipfs ls "$HASH_DIR2" >>actual8 && - ipfs dag get "$HASH_DIR1" | jq -r ".Links[] | .Hash | .[\"/\"]" >>actual8 && - test_cmp expected8 actual8 -' - -# todo: make this faster somehow. -test_expect_success "some are no longer there" ' - test_must_fail ipfs cat "$HASH_FILE5" && - test_must_fail ipfs cat "$HASH_FILE3" && - test_must_fail ipfs ls "$HASH_DIR3" -' - -test_launch_ipfs_daemon_without_network -test_expect_success "recursive pin fails without objects" ' - test_must_fail ipfs pin add -r "$HASH_DIR1" 2>err_expected8 && - grep "ipld: could not find" err_expected8 || - test_fsh cat err_expected8 -' - -# Regression test for https://github.com/ipfs/go-ipfs/issues/4650 -# This test requires the daemon. Otherwise, the pin changes are reverted when -# the pin fails in the previous test. -test_expect_success "failed recursive pin does not remove direct pin" ' - test_pin_flag "$HASH_DIR1" direct true -' -test_kill_ipfs_daemon - -test_expect_success "test add nopin file" ' - echo "test nopin data" > test_nopin_data && - NOPINHASH=$(ipfs add -q --pin=false test_nopin_data) && - test_pin_flag "$NOPINHASH" direct false && - test_pin_flag "$NOPINHASH" indirect false && - test_pin_flag "$NOPINHASH" recursive false -' - - -test_expect_success "test add nopin dir" ' - mkdir nopin_dir1 && - echo "some nopin text 1" >nopin_dir1/file1 && - echo "some nopin text 2" >nopin_dir1/file2 && - ipfs add -Q -r --pin=false nopin_dir1 >actual && - echo "$HASH_NOPINDIR" >expected && - test_cmp actual expected && - test_pin_flag "$HASH_NOPINDIR" direct false && - test_pin_flag "$HASH_NOPINDIR" indirect false && - test_pin_flag "$HASH_NOPINDIR" recursive false && - test_pin_flag "$HASH_NOPIN_FILE1" direct false && - test_pin_flag "$HASH_NOPIN_FILE1" indirect false && - test_pin_flag "$HASH_NOPIN_FILE1" recursive false && - test_pin_flag "$HASH_NOPIN_FILE2" direct false && - test_pin_flag "$HASH_NOPIN_FILE2" indirect false && - test_pin_flag "$HASH_NOPIN_FILE2" recursive false - -' - -FICTIONAL_HASH="QmXV4f9v8a56MxWKBhP3ETsz4EaafudU1cKfPaaJnenc48" -test_launch_ipfs_daemon -test_expect_success "test unpinning a hash that's not pinned" " - test_expect_code 1 ipfs pin rm $FICTIONAL_HASH --timeout=2s - test_expect_code 1 ipfs pin rm $FICTIONAL_HASH/a --timeout=2s - test_expect_code 1 ipfs pin rm $FICTIONAL_HASH/a/b --timeout=2s -" -test_kill_ipfs_daemon - -# test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0082-repo-gc-auto.sh b/test/sharness/t0082-repo-gc-auto.sh deleted file mode 100755 index 50a4e6fae7f..00000000000 --- a/test/sharness/t0082-repo-gc-auto.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -# -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo auto gc" - -. lib/test-lib.sh - -skip_all="skipping auto repo gc tests until they can be fixed" - -test_done - -check_ipfs_storage() { - ipfs config Datastore.StorageMax -} - -test_init_ipfs - -test_expect_success "generate 2 600 kB files and 2 MB file using go-random" ' - random 600k 41 >600k1 && - random 600k 42 >600k2 && - random 2M 43 >2M -' - -test_expect_success "set ipfs gc watermark, storage max, and gc timeout" ' - test_config_set Datastore.StorageMax "2MB" && - test_config_set --json Datastore.StorageGCWatermark 60 && - test_config_set Datastore.GCPeriod "20ms" -' - -test_launch_ipfs_daemon --enable-gc - -test_gc() { - test_expect_success "adding data below watermark doesn't trigger auto gc" ' - ipfs add 600k1 >/dev/null && - disk_usage "$IPFS_PATH/blocks" >expected && - go-sleep 40ms && - disk_usage "$IPFS_PATH/blocks" >actual && - test_cmp expected actual - ' - - test_expect_success "adding data beyond watermark triggers auto gc" ' - HASH=`ipfs add -q 600k2` && - ipfs pin rm -r $HASH && - go-sleep 40ms && - DU=$(disk_usage "$IPFS_PATH/blocks") && - if test $(uname -s) = "Darwin"; then - test "$DU" -lt 1400 # 60% of 2MB - else - test "$DU" -lt 1000000 - fi - ' -} - -#TODO: conditional GC test is disabled due to files size bug in ipfs add -#test_expect_success "adding data beyond storageMax fails" ' -# test_must_fail ipfs add 2M 2>add_fail_out -#' -#test_expect_success "ipfs add not enough space message looks good" ' -# echo "Error: file size exceeds slack space allowed by storageMax. Maybe unpin some files?" >add_fail_exp && -# test_cmp add_fail_exp add_fail_out -#' - -test_expect_success "periodic auto gc stress test" ' - for i in $(test_seq 1 20) - do - test_gc || return 1 - done -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0084-repo-read-rehash.sh b/test/sharness/t0084-repo-read-rehash.sh deleted file mode 100755 index 5528f1bf6e0..00000000000 --- a/test/sharness/t0084-repo-read-rehash.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) Jakub Sztandera -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs blockstore repo read check." - -. lib/test-lib.sh - -rm -rf "$IPF_PATH/*" - -test_init_ipfs - - -H_BLOCK1=$(echo "Block 1" | ipfs add -q) -H_BLOCK2=$(echo "Block 2" | ipfs add -q) - -BS_BLOCK1="XZ/CIQPDDQH5PDJTF4QSNMPFC45FQZH5MBSWCX2W254P7L7HGNHW5MQXZA.data" -BS_BLOCK2="CK/CIQNYWBOKHY7TCY7FUOBXKVJ66YRMARDT3KC7PPY6UWWPZR4YA67CKQ.data" - - -test_expect_success 'blocks are swapped' ' - ipfs cat $H_BLOCK2 > noswap && - cp -f "$IPFS_PATH/blocks/$BS_BLOCK1" "$IPFS_PATH/blocks/$BS_BLOCK2" && - ipfs cat $H_BLOCK2 > swap && - test_must_fail test_cmp noswap swap -' - -ipfs config --bool Datastore.HashOnRead true - -test_check_bad_blocks() { - test_expect_success 'getting modified block fails' ' - (test_must_fail ipfs cat $H_BLOCK2 2> err_msg) && - grep "block in storage has different hash than requested" err_msg - ' - - test_expect_success "block shows up in repo verify" ' - test_expect_code 1 ipfs repo verify | cid-fmt --filter -b base32 "%M" > verify_out && - H_BLOCK2_MH=`cid-fmt -b base32 "%M" $H_BLOCK2` && - grep "$H_BLOCK2_MH" verify_out - ' -} - -test_check_bad_blocks - -test_expect_success "can add and cat a raw-leaf file" ' - HASH=$(echo "stuff" | ipfs add -q --raw-leaves) && - ipfs cat $HASH > /dev/null -' - -test_launch_ipfs_daemon -test_check_bad_blocks -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0086-repo-verify.sh b/test/sharness/t0086-repo-verify.sh deleted file mode 100755 index 0f12fef8f82..00000000000 --- a/test/sharness/t0086-repo-verify.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo fsck" - -. lib/test-lib.sh - -test_init_ipfs - -sort_rand() { - case `uname` in - Linux|FreeBSD) - sort -R - ;; - Darwin) - ruby -e 'puts STDIN.readlines.shuffle' - ;; - *) - echo "unsupported system: $(uname)" - esac -} - -check_random_corruption() { - to_break=$(find "$IPFS_PATH/blocks" -type f -name '*.data' | sort_rand | head -n 1) - - test_expect_success "back up file and overwrite it" ' - cp "$to_break" backup_file && - echo "this is super broken" > "$to_break" - ' - - test_expect_success "repo verify detects failure" ' - test_expect_code 1 ipfs repo verify - ' - - test_expect_success "replace the object" ' - cp backup_file "$to_break" - ' - - test_expect_success "ipfs repo verify passes just fine now" ' - ipfs repo verify - ' -} - -test_expect_success "create some files" ' - random-files -depth=3 -dirs=4 -files=10 foobar > /dev/null -' - -test_expect_success "add them all" ' - ipfs add -r -q foobar > /dev/null -' - -for i in `seq 20` -do - check_random_corruption -done - -test_done diff --git a/test/sharness/t0087-repo-robust-gc.sh b/test/sharness/t0087-repo-robust-gc.sh deleted file mode 100755 index 884de5774e0..00000000000 --- a/test/sharness/t0087-repo-robust-gc.sh +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test robustness of garbage collector" - -. lib/test-lib.sh -set -e - -to_raw_cid() { - ipfs cid format -b b --mc raw -v 1 "$1" -} - -test_gc_robust_part1() { - - test_expect_success "add a 1MB file with --raw-leaves" ' - random 1048576 56 > afile && - HASH1=`ipfs add --raw-leaves -q --cid-version 1 afile` && - REFS=`ipfs refs -r $HASH1` && - read LEAF1 LEAF2 LEAF3 LEAF4 < <(echo $REFS) - ' - - test_expect_success "find data blocks for added file" ' - HASH1MH=`cid-fmt -b base32 "%M" $HASH1` && - LEAF1MH=`cid-fmt -b base32 "%M" $LEAF1` && - LEAF2MH=`cid-fmt -b base32 "%M" $LEAF2` && - HASH1FILE=`find .ipfs/blocks -type f | grep -i $HASH1MH` && - LEAF1FILE=`find .ipfs/blocks -type f | grep -i $LEAF1MH` && - LEAF2FILE=`find .ipfs/blocks -type f | grep -i $LEAF2MH` - ' - - test_expect_success "remove a leaf node from the repo manually" ' - rm "$LEAF1FILE" - ' - - test_expect_success "check that the node is removed" ' - test_must_fail ipfs cat $HASH1 - ' - - test_expect_success "'ipfs repo gc' should still be fine" ' - ipfs repo gc - ' - - test_expect_success "corrupt the root node of 1MB file" ' - test -e "$HASH1FILE" && - dd if=/dev/zero of="$HASH1FILE" count=1 bs=100 conv=notrunc - ' - - test_expect_success "'ipfs repo gc' should abort without removing anything" ' - test_must_fail ipfs repo gc 2>&1 | tee gc_err && - grep -q "could not retrieve links for $HASH1" gc_err && - grep -q "aborted" gc_err - ' - - test_expect_success "leaf nodes were not removed after gc" ' - ipfs cat $LEAF3 > /dev/null && - ipfs cat $LEAF4 > /dev/null - ' - - test_expect_success "unpin the 1MB file" ' - ipfs pin rm $HASH1 - ' - - # make sure the permission problem is fixed on exit, otherwise cleanup - # will fail - trap "chmod 700 `dirname "$LEAF2FILE"` 2> /dev/null || true" 0 - - test_expect_success "create a permission problem" ' - chmod 500 `dirname "$LEAF2FILE"` && - test_must_fail ipfs block rm $LEAF2 2>&1 | tee block_rm_err && - grep -q "permission denied" block_rm_err - ' - - # repo gc outputs raw multihashes. We check HASH1 with block stat rather than - # grepping the output since it's not a raw multihash - test_expect_success "'ipfs repo gc' should still run and remove as much as possible" ' - test_must_fail ipfs repo gc 2>&1 | tee repo_gc_out && - grep -q "could not remove $LEAF2" repo_gc_out && - grep -q "removed $(to_raw_cid $LEAF3)" repo_gc_out && - grep -q "removed $(to_raw_cid $LEAF4)" repo_gc_out && - test_must_fail ipfs block stat $HASH1 - ' - - test_expect_success "fix the permission problem" ' - chmod 700 `dirname "$LEAF2FILE"` - ' - - test_expect_success "'ipfs repo gc' should be ok now" ' - ipfs repo gc | tee repo_gc_out - grep -q "removed $(to_raw_cid $LEAF2)" repo_gc_out - ' -} - -test_gc_robust_part2() { - - test_expect_success "add 1MB file normally (i.e., without raw leaves)" ' - random 1048576 56 > afile && - HASH2=`ipfs add -q afile` - ' - - LEAF1=QmSijovevteoY63Uj1uC5b8pkpDU5Jgyk2dYBqz3sMJUPc - LEAF1FILE=.ipfs/blocks/ME/CIQECF2K344QITW5S6E6H6T4DOXDDB2XA2V7BBOCIMN2VVF4Q77SMEY.data - - LEAF2=QmTbPEyrA1JyGUHFvmtx1FNZVzdBreMv8Hc8jV9sBRWhNA - LEAF2FILE=.ipfs/blocks/WM/CIQE4EFIJN2SUTQYSKMKNG7VM75W3SXT6LWJCHJJ73UAWN73WCX3WMY.data - - - test_expect_success "add some additional unpinned content" ' - random 1000 3 > junk1 && - random 1000 4 > junk2 && - JUNK1=`ipfs add --pin=false -q junk1` && - JUNK2=`ipfs add --pin=false -q junk2` - ' - - test_expect_success "remove a leaf node from the repo manually" ' - rm "$LEAF1FILE" - ' - - test_expect_success "'ipfs repo gc' should abort" ' - test_must_fail ipfs repo gc 2>&1 | tee repo_gc_out && - grep -q "could not retrieve links for $LEAF1" repo_gc_out && - grep -q "aborted" repo_gc_out - ' - - test_expect_success "test that garbage collector really aborted" ' - ipfs cat $JUNK1 > /dev/null && - ipfs cat $JUNK2 > /dev/null - ' - - test_expect_success "corrupt a key" ' - test -e "$LEAF2FILE" && - dd if=/dev/zero of="$LEAF2FILE" count=1 bs=100 conv=notrunc - ' - - test_expect_success "'ipfs repo gc' should abort with two errors" ' - test_must_fail ipfs repo gc 2>&1 | tee repo_gc_out && - grep -q "could not retrieve links for $LEAF1" repo_gc_out && - grep -q "could not retrieve links for $LEAF2" repo_gc_out && - grep -q "aborted" repo_gc_out - ' - - test_expect_success "'ipfs repo gc --stream-errors' should abort and report each error separately" ' - test_must_fail ipfs repo gc --stream-errors 2>&1 | tee repo_gc_out && - grep -q "Error: could not retrieve links for $LEAF1" repo_gc_out && - grep -q "Error: could not retrieve links for $LEAF2" repo_gc_out && - grep -q "Error: garbage collection aborted" repo_gc_out - ' - - test_expect_success "unpin 1MB file" ' - ipfs pin rm $HASH2 - ' - - test_expect_success "'ipfs repo gc' should be fine now" ' - ipfs repo gc | tee repo_gc_out && - grep -q "removed $(to_raw_cid $HASH2)" repo_gc_out && - grep -q "removed $(to_raw_cid $LEAF2)" repo_gc_out - ' -} - -test_init_ipfs - -test_gc_robust_part1 -test_gc_robust_part2 - -test_launch_ipfs_daemon_without_network - -test_gc_robust_part1 -test_gc_robust_part2 - -test_kill_ipfs_daemon - -test_done - diff --git a/test/sharness/t0088-repo-stat-symlink.sh b/test/sharness/t0088-repo-stat-symlink.sh deleted file mode 100755 index 6e6aedbd4cb..00000000000 --- a/test/sharness/t0088-repo-stat-symlink.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 John Reed -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test 'ipfs repo stat' where IPFS_PATH is a symbolic link" - -. lib/test-lib.sh - -test_expect_success "create symbolic link for IPFS_PATH" ' - mkdir sym_link_target && - ln -s sym_link_target .ipfs -' - -test_init_ipfs - -# ensure that the RepoSize is reasonable when checked via a symlink. -test_expect_success "'ipfs repo stat' RepoSize is correct with sym link" ' - reposize_symlink=$(ipfs repo stat | grep RepoSize | awk '\''{ print $2 }'\'') && - symlink_size=$(file_size .ipfs) && - test "${reposize_symlink}" -gt "${symlink_size}" -' - -test_done diff --git a/test/sharness/t0090-get.sh b/test/sharness/t0090-get.sh deleted file mode 100755 index 6a803080e85..00000000000 --- a/test/sharness/t0090-get.sh +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Matt Bell -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test get command" - -. lib/test-lib.sh - -test_init_ipfs - -test_ipfs_get_flag() { - ext="$1"; shift - tar_flag="$1"; shift - flag="$@" - - test_expect_success "ipfs get $flag succeeds" ' - ipfs get "$HASH" '"$flag"' >actual - ' - - test_expect_success "ipfs get $flag output looks good" ' - printf "%s\n" "Saving archive to $HASH$ext" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs get $flag archive output is valid" ' - tar "$tar_flag" "$HASH$ext" && - test_cmp "$HASH" data && - rm "$HASH$ext" && - rm "$HASH" - ' -} - -# we use a function so that we can run it both offline + online -test_get_cmd() { - - test_expect_success "'ipfs get --help' succeeds" ' - ipfs get --help >actual - ' - - test_expect_success "'ipfs get --help' output looks good" ' - egrep "ipfs get.*" actual >/dev/null || - test_fsh cat actual - ' - - test_expect_success "ipfs get succeeds" ' - echo "Hello Worlds!" >data && - HASH=`ipfs add -q data` && - ipfs get "$HASH" >actual - ' - - test_expect_success "ipfs get output looks good" ' - printf "%s\n" "Saving file(s) to $HASH" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs get file output looks good" ' - test_cmp "$HASH" data - ' - - test_expect_success "ipfs get DOES NOT error when trying to overwrite a file" ' - ipfs get "$HASH" >actual && - rm "$HASH" - ' - - test_expect_success "ipfs get works with raw leaves" ' - HASH2=$(ipfs add --raw-leaves -q data) && - ipfs get "$HASH2" >actual2 - ' - - test_expect_success "ipfs get output looks good" ' - printf "%s\n" "Saving file(s) to $HASH2" >expected2 && - test_cmp expected2 actual2 - ' - - test_expect_success "ipfs get file output looks good" ' - test_cmp "$HASH2" data - ' - - test_ipfs_get_flag ".tar" "-xf" -a - - test_ipfs_get_flag ".tar.gz" "-zxf" -a -C - - test_ipfs_get_flag ".tar.gz" "-zxf" -a -C -l 9 - - test_expect_success "ipfs get succeeds (directory)" ' - mkdir -p dir && - touch dir/a && - mkdir -p dir/b && - echo "Hello, Worlds!" >dir/b/c && - HASH2=`ipfs add -r -Q dir` && - ipfs get "$HASH2" >actual - ' - - test_expect_success "ipfs get output looks good (directory)" ' - printf "%s\n" "Saving file(s) to $HASH2" >expected && - test_cmp expected actual - ' - - test_expect_success "ipfs get output is valid (directory)" ' - test_cmp dir/a "$HASH2"/a && - test_cmp dir/b/c "$HASH2"/b/c && - rm -r "$HASH2" - ' - - # Test issue #4720: problems when path contains a trailing slash. - test_expect_success "ipfs get with slash (directory)" ' - ipfs get "$HASH2/" && - test_cmp dir/a "$HASH2"/a && - test_cmp dir/b/c "$HASH2"/b/c && - rm -r "$HASH2" - ' - - test_expect_success "ipfs get -a -C succeeds (directory)" ' - ipfs get "$HASH2" -a -C >actual - ' - - test_expect_success "ipfs get -a -C output looks good (directory)" ' - printf "%s\n" "Saving archive to $HASH2.tar.gz" >expected && - test_cmp expected actual - ' - - test_expect_success "gzipped tar archive output is valid (directory)" ' - tar -zxf "$HASH2".tar.gz && - test_cmp dir/a "$HASH2"/a && - test_cmp dir/b/c "$HASH2"/b/c && - rm -r "$HASH2" - ' - - test_expect_success "ipfs get ../.. should fail" ' - test_must_fail ipfs get ../.. 2>actual && - test_should_contain "Error: invalid path \"../..\"" actual - ' - - test_expect_success "create small file" ' - echo "foo" > small && - ipfs add -q small > hash_small - ' - - test_expect_success "get small file" ' - ipfs get -o out_small $(cat hash_small) && - test_cmp small out_small - ' - - test_expect_success "create medium file" ' - head -c 16000 > medium && - ipfs add -q medium > hash_medium - ' - - test_expect_success "get medium file" ' - ipfs get -o out_medium $(cat hash_medium) && - test_cmp medium out_medium - ' -} - -test_get_fail() { - test_expect_success "create an object that has unresolvable links" ' - cat <<-\EOF >bad_object && -{"Data":{"/":{"bytes":"CAE"}},"Links":[{"Hash":{"/":"Qmd4mG6pDFDmDTn6p3hX1srP8qTbkyXKj5yjpEsiHDX3u8"},"Name":"bar","Tsize":56},{"Hash":{"/":"QmUTjwRnG28dSrFFVTYgbr6LiDLsBmRr2SaUSTGheK2YqG"},"Name":"baz","Tsize":24266},{"Hash":{"/":"QmZzaC6ydNXiR65W8VjGA73ET9MZ6VFAqUT1ngYMXcpihn"},"Name":"foo","Tsize":1897}]} -EOF - cat bad_object | ipfs dag put --store-codec dag-pb > put_out - ' - - test_expect_success "output looks good" ' - echo "bafybeifrjjol3gixedca6etdwccnvwfvhurc4wb3i5mnk2rvwvyfcgwxd4" > put_exp && - test_cmp put_exp put_out - ' - - test_expect_success "ipfs get fails" ' - test_expect_code 1 ipfs get QmaGidyrnX8FMbWJoxp8HVwZ1uRKwCyxBJzABnR1S2FVUr - ' -} - -# should work offline -test_get_cmd - -# only really works offline, will try and search network when online -test_get_fail - -# should work online -test_launch_ipfs_daemon -test_get_cmd - -test_expect_success "empty request to get doesn't panic and returns error" ' - curl -X POST "http://$API_ADDR/api/v0/get" > curl_out || true && - grep "argument \"ipfs-path\" is required" curl_out -' -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0095-refs.sh b/test/sharness/t0095-refs.sh deleted file mode 100755 index ca84bb6ad07..00000000000 --- a/test/sharness/t0095-refs.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018 Protocol Labs, Inc -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test 'ipfs refs' command" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon_without_network - -# This file performs tests with the following directory -# structure. -# -# L0- _______ A_________ -# / | \ \ -# L1- B C D 1.txt -# / \ | | -# L2- D 1.txt B 2.txt -# | / \ -# L3- 2.txt D 1.txt -# | -# L4- 2.txt -# -# 'ipfs add -r A' output: -# -# added QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v A/1.txt -# added QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v A/B/1.txt -# added QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 A/B/D/2.txt -# added QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v A/C/B/1.txt -# added QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 A/C/B/D/2.txt -# added QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 A/D/2.txt -# added QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS A/B/D -# added QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa A/B -# added QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS A/C/B/D -# added QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa A/C/B -# added QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH A/C -# added QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS A/D -# added QmU6xujRsYzcrkocuR3fhfnkZBB8eyUFFq4WKRGw2aS15h A -# -# 'ipfs refs -r QmU6xujRsYzcrkocuR3fhfnkZBB8eyUFFq4WKRGw2aS15h' sample output -# that shows visit order in a stable go-ipfs version: -# -# QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v - 1.txt -# QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa - B (A/B) -# QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v - 1.txt (A/B/1.txt) -# QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS - D (A/B/D) -# QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 - 2.txt (A/B/D/2.txt) -# QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH - C (A/C) -# QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa - B (A/C/B) -# QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v - 1.txt (A/C/B/1.txt) -# QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS - D (A/C/B/D) -# QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 - 2.txt (A/C/B/D/2.txt) -# QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS - D (A/D) -# QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 - 2.txt (A/D/2.txt) - - -refsroot=QmU6xujRsYzcrkocuR3fhfnkZBB8eyUFFq4WKRGw2aS15h - -test_expect_success "create and add folders for refs" ' - mkdir -p A/B/D A/C/B/D A/D - echo "1" > A/1.txt - echo "1" > A/B/1.txt - echo "1" > A/C/B/1.txt - echo "2" > A/B/D/2.txt - echo "2" > A/C/B/D/2.txt - echo "2" > A/D/2.txt - root=$(ipfs add -r -Q A) - [[ "$root" == "$refsroot" ]] -' - -test_refs_output() { - ARGS=$1 - FILTER=$2 - - test_expect_success "ipfs refs $ARGS -r" ' - cat < expected.txt -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -EOF - - ipfs refs $ARGS -r $refsroot > refsr.txt - test_cmp expected.txt refsr.txt - ' - - # Unique is like above but removing duplicates - test_expect_success "ipfs refs $ARGS -r --unique" ' - cat < expected.txt -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH -EOF - - ipfs refs $ARGS -r --unique $refsroot > refsr.txt - test_cmp expected.txt refsr.txt - ' - - # First level is 1.txt, B, C, D - test_expect_success "ipfs refs $ARGS" ' - cat < expected.txt -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -EOF - ipfs refs $ARGS $refsroot > refs.txt - test_cmp expected.txt refs.txt - ' - - # max-depth=0 should return an empty list - test_expect_success "ipfs refs $ARGS -r --max-depth=0" ' - cat < expected.txt -EOF - ipfs refs $ARGS -r --max-depth=0 $refsroot > refs.txt - test_cmp expected.txt refs.txt - ' - - # max-depth=1 should be equivalent to running without -r - test_expect_success "ipfs refs $ARGS -r --max-depth=1" ' - ipfs refs $ARGS -r --max-depth=1 $refsroot > refsr.txt - ipfs refs $ARGS $refsroot > refs.txt - test_cmp refsr.txt refs.txt - ' - - # We should see the depth limit engage at level 2 - test_expect_success "ipfs refs $ARGS -r --max-depth=2" ' - cat < expected.txt -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -EOF - ipfs refs $ARGS -r --max-depth=2 $refsroot > refsr.txt - test_cmp refsr.txt expected.txt - ' - - # Here branch pruning and re-exploration come into place - # At first it should see D at level 2 and don't go deeper. - # But then after doing C it will see D at level 1 and go deeper - # so that it outputs the hash for 2.txt (-q61). - # We also see that C/B is pruned as it's been shown before. - # - # Excerpt from diagram above: - # - # L0- _______ A_________ - # / | \ \ - # L1- B C D 1.txt - # / \ | | - # L2- D 1.txt B 2.txt - test_expect_success "ipfs refs $ARGS -r --unique --max-depth=2" ' - cat < expected.txt -QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v -QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa -QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS -QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH -QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 -EOF - ipfs refs $ARGS -r --unique --max-depth=2 $refsroot > refsr.txt - test_cmp refsr.txt expected.txt - ' -} - -test_refs_output '' 'cat' - -test_refs_output '--cid-base=base32' 'ipfs cid base32' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0101-iptb-name.sh b/test/sharness/t0101-iptb-name.sh deleted file mode 100755 index e5cdadcb08f..00000000000 --- a/test/sharness/t0101-iptb-name.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo operations" - -. lib/test-lib.sh - -num_nodes=4 - -test_expect_success "set up an iptb cluster" ' - iptb testbed create -type localipfs -count $num_nodes -force -init -' - -startup_cluster $num_nodes - -test_expect_success "add an object on one node" ' - echo "ipns is super fun" > file && - HASH_FILE=$(ipfsi 1 add -q file) -' - -test_expect_success "publish that object as an ipns entry" ' - ipfsi 1 name publish $HASH_FILE -' - -test_expect_success "add an entry on another node pointing to that one" ' - NODE1_ID=$(iptb attr get 1 id) && - ipfsi 2 name publish /ipns/$NODE1_ID -' - -test_expect_success "cat that entry on a third node" ' - NODE2_ID=$(iptb attr get 2 id) && - ipfsi 3 cat /ipns/$NODE2_ID > output -' - -test_expect_success "ensure output was the same" ' - test_cmp file output -' - -test_expect_success "shut down iptb" ' - iptb stop -' - -test_done diff --git a/test/sharness/t0109-gateway-web-_redirects.sh b/test/sharness/t0109-gateway-web-_redirects.sh deleted file mode 100755 index 0bc2a23b600..00000000000 --- a/test/sharness/t0109-gateway-web-_redirects.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test HTTP Gateway _redirects support" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon - -## ============================================================================ -## Test _redirects file support -## ============================================================================ - -# Import test case -# Run `ipfs cat /ipfs/$REDIRECTS_DIR_CID/_redirects` to see sample _redirects file -test_expect_success "Add the _redirects file test directory" ' - ipfs dag import --pin-roots ../t0109-gateway-web-_redirects-data/redirects.car -' -CAR_ROOT_CID=QmQyqMY5vUBSbSxyitJqthgwZunCQjDVtNd8ggVCxzuPQ4 - -REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/examples | cut -d "/" -f3) -REDIRECTS_DIR_HOSTNAME="${REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/redirect-one redirects with default of 301, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/redirect-one" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /one.html" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/301-redirect-one redirects with 301, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/301-redirect-one" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /one.html" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/302-redirect-two redirects with 302, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/302-redirect-two" > response && - test_should_contain "302 Found" response && - test_should_contain "Location: /two.html" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/200-index returns 200, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/200-index" > response && - test_should_contain "my index" response && - test_should_contain "200 OK" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/posts/:year/:month/:day/:title redirects with 301 and placeholders, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/posts/2022/01/01/hello-world" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /articles/2022/01/01/hello-world" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/splat/one.html redirects with 301 and splat placeholder, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/splat/one.html" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /redirected-splat/one.html" response -' - -# ensure custom 4xx works and has the same cache headers as regular /ipfs/ path -CUSTOM_4XX_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/examples/404.html | cut -d "/" -f3) -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/not-found/has-no-redirects-entry returns custom 404, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/not-found/has-no-redirects-entry" > response && - test_should_contain "404 Not Found" response && - test_should_contain "Cache-Control: public, max-age=29030400, immutable" response && - test_should_contain "Etag: \"$CUSTOM_4XX_CID\"" response && - test_should_contain "my 404" response -' - -CUSTOM_4XX_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/examples/410.html | cut -d "/" -f3) -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/gone/has-no-redirects-entry returns custom 410, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/gone/has-no-redirects-entry" > response && - test_should_contain "410 Gone" response && - test_should_contain "Cache-Control: public, max-age=29030400, immutable" response && - test_should_contain "Etag: \"$CUSTOM_4XX_CID\"" response && - test_should_contain "my 410" response -' - -CUSTOM_4XX_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/examples/451.html | cut -d "/" -f3) -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/unavail/has-no-redirects-entry returns custom 451, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/unavail/has-no-redirects-entry" > response && - test_should_contain "451 Unavailable For Legal Reasons" response && - test_should_contain "Cache-Control: public, max-age=29030400, immutable" response && - test_should_contain "Etag: \"$CUSTOM_4XX_CID\"" response && - test_should_contain "my 451" response -' - -test_expect_success "request for $REDIRECTS_DIR_HOSTNAME/catch-all returns 200, per _redirects file" ' - curl -sD - --resolve $REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$REDIRECTS_DIR_HOSTNAME/catch-all" > response && - test_should_contain "200 OK" response && - test_should_contain "my index" response -' - -# This test ensures _redirects is supported only on Web Gateways that use Host header (DNSLink, Subdomain) -test_expect_success "request for http://127.0.0.1:$GWAY_PORT/ipfs/$REDIRECTS_DIR_CID/301-redirect-one returns generic 404 (no custom 404 from _redirects since no origin isolation)" ' - curl -sD - "http://127.0.0.1:$GWAY_PORT/ipfs/$REDIRECTS_DIR_CID/301-redirect-one" > response && - test_should_contain "404 Not Found" response && - test_should_not_contain "my 404" response -' - -# With CRLF line terminator -NEWLINE_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/newlines | cut -d "/" -f3) -NEWLINE_REDIRECTS_DIR_HOSTNAME="${NEWLINE_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -test_expect_success "newline: _redirects has CRLF line terminators" ' - ipfs cat /ipfs/$NEWLINE_REDIRECTS_DIR_CID/_redirects | file - > response && - test_should_contain "with CRLF line terminators" response -' - -test_expect_success "newline: request for $NEWLINE_REDIRECTS_DIR_HOSTNAME/redirect-one redirects with default of 301, per _redirects file" ' - curl -sD - --resolve $NEWLINE_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$NEWLINE_REDIRECTS_DIR_HOSTNAME/redirect-one" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /one.html" response -' - -# Good codes -GOOD_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/good-codes | cut -d "/" -f3) -GOOD_REDIRECTS_DIR_HOSTNAME="${GOOD_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -test_expect_success "good codes: request for $GOOD_REDIRECTS_DIR_HOSTNAME/redirect-one redirects with default of 301, per _redirects file" ' - curl -sD - --resolve $GOOD_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$GOOD_REDIRECTS_DIR_HOSTNAME/a301" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /b301" response -' - -# Bad codes -BAD_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/bad-codes | cut -d "/" -f3) -BAD_REDIRECTS_DIR_HOSTNAME="${BAD_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -# if accessing a path that doesn't exist, read _redirects and fail parsing, and return error -test_expect_success "bad codes: request for $BAD_REDIRECTS_DIR_HOSTNAME/not-found returns error about bad code" ' - curl -sD - --resolve $BAD_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$BAD_REDIRECTS_DIR_HOSTNAME/not-found" > response && - test_should_contain "500" response && - test_should_contain "status code 999 is not supported" response -' - -# if accessing a path that does exist, don't read _redirects and therefore don't fail parsing -test_expect_success "bad codes: request for $BAD_REDIRECTS_DIR_HOSTNAME/found.html doesn't return error about bad code" ' - curl -sD - --resolve $BAD_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$BAD_REDIRECTS_DIR_HOSTNAME/found.html" > response && - test_should_contain "200" response && - test_should_contain "my found" response && - test_should_not_contain "unsupported redirect status" response -' - -# Invalid file, containing "hello" -INVALID_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/invalid | cut -d "/" -f3) -INVALID_REDIRECTS_DIR_HOSTNAME="${INVALID_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -# if accessing a path that doesn't exist, read _redirects and fail parsing, and return error -test_expect_success "invalid file: request for $INVALID_REDIRECTS_DIR_HOSTNAME/not-found returns error about invalid redirects file" ' - curl -sD - --resolve $INVALID_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$INVALID_REDIRECTS_DIR_HOSTNAME/not-found" > response && - test_should_contain "500" response && - test_should_contain "could not parse _redirects:" response -' - -# Invalid file, containing forced redirect -INVALID_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/forced | cut -d "/" -f3) -INVALID_REDIRECTS_DIR_HOSTNAME="${INVALID_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" - -# if accessing a path that doesn't exist, read _redirects and fail parsing, and return error -test_expect_success "invalid file: request for $INVALID_REDIRECTS_DIR_HOSTNAME/not-found returns error about invalid redirects file" ' - curl -sD - --resolve $INVALID_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$INVALID_REDIRECTS_DIR_HOSTNAME/not-found" > response && - test_should_contain "500" response && - test_should_contain "could not parse _redirects:" response && - test_should_contain "forced redirects (or \"shadowing\") are not supported" response -' - -# if accessing a path that doesn't exist and _redirects file is too large, return error -TOO_LARGE_REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/too-large | cut -d "/" -f3) -TOO_LARGE_REDIRECTS_DIR_HOSTNAME="${TOO_LARGE_REDIRECTS_DIR_CID}.ipfs.localhost:$GWAY_PORT" -test_expect_success "invalid file: request for $TOO_LARGE_REDIRECTS_DIR_HOSTNAME/not-found returns error about too large redirects file" ' - curl -sD - --resolve $TOO_LARGE_REDIRECTS_DIR_HOSTNAME:127.0.0.1 "http://$TOO_LARGE_REDIRECTS_DIR_HOSTNAME/not-found" > response && - test_should_contain "500" response && - test_should_contain "could not parse _redirects:" response && - test_should_contain "redirects file size cannot exceed" response -' - -test_kill_ipfs_daemon - -# disable wildcard DNSLink gateway -# and enable it on specific DNSLink hostname -ipfs config --json Gateway.NoDNSLink true && \ -ipfs config --json Gateway.PublicGateways '{ - "dnslink-enabled-on-fqdn.example.org": { - "NoDNSLink": false, - "UseSubdomains": false, - "Paths": ["/ipfs"] - }, - "dnslink-disabled-on-fqdn.example.com": { - "NoDNSLink": true, - "UseSubdomains": false, - "Paths": [] - } -}' || exit 1 - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -# REDIRECTS_DIR_CID=$(ipfs resolve -r /ipfs/$CAR_ROOT_CID/examples | cut -d "/" -f3) -DNSLINK_FQDN="dnslink-enabled-on-fqdn.example.org" -NO_DNSLINK_FQDN="dnslink-disabled-on-fqdn.example.com" -export IPFS_NS_MAP="$DNSLINK_FQDN:/ipfs/$REDIRECTS_DIR_CID" - -# restart daemon to apply config changes -test_launch_ipfs_daemon - -# make sure test setup is valid (fail if CoreAPI is unable to resolve) -test_expect_success "spoofed DNSLink record resolves in cli" " - ipfs resolve /ipns/$DNSLINK_FQDN > result && - test_should_contain \"$REDIRECTS_DIR_CID\" result && - ipfs cat /ipns/$DNSLINK_FQDN/_redirects > result && - test_should_contain \"index.html\" result -" - -test_expect_success "request for $DNSLINK_FQDN/redirect-one redirects with default of 301, per _redirects file" ' - curl -sD - --resolve $DNSLINK_FQDN:$GWAY_PORT:127.0.0.1 "http://$DNSLINK_FQDN:$GWAY_PORT/redirect-one" > response && - test_should_contain "301 Moved Permanently" response && - test_should_contain "Location: /one.html" response -' - -# ensure custom 404 works and has the same cache headers as regular /ipns/ paths -test_expect_success "request for $DNSLINK_FQDN/en/has-no-redirects-entry returns custom 404, per _redirects file" ' - curl -sD - --resolve $DNSLINK_FQDN:$GWAY_PORT:127.0.0.1 "http://$DNSLINK_FQDN:$GWAY_PORT/not-found/has-no-redirects-entry" > response && - test_should_contain "404 Not Found" response && - test_should_contain "Etag: \"Qmd9GD7Bauh6N2ZLfNnYS3b7QVAijbud83b8GE8LPMNBBP\"" response && - test_should_not_contain "Cache-Control: public, max-age=29030400, immutable" response && - test_should_not_contain "immutable" response && - test_should_contain "Date: " response && - test_should_contain "my 404" response -' - -test_expect_success "request for $NO_DNSLINK_FQDN/redirect-one does not redirect, since DNSLink is disabled" ' - curl -sD - --resolve $NO_DNSLINK_FQDN:$GWAY_PORT:127.0.0.1 "http://$NO_DNSLINK_FQDN:$GWAY_PORT/redirect-one" > response && - test_should_not_contain "one.html" response && - test_should_not_contain "301 Moved Permanently" response && - test_should_not_contain "Location:" response -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0112-gateway-cors.sh b/test/sharness/t0112-gateway-cors.sh deleted file mode 100755 index 90813ad6a21..00000000000 --- a/test/sharness/t0112-gateway-cors.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test CORS behavior on Gateway port" - -. lib/test-lib.sh - -test_init_ipfs - -# Default config -test_expect_success "Default Gateway.HTTPHeaders is empty (implicit CORS values from boxo/gateway)" ' -cat < expected -{} -EOF - ipfs config --json Gateway.HTTPHeaders > actual && - test_cmp expected actual -' - -test_launch_ipfs_daemon - -thash='bafkqabtimvwgy3yk' # hello - -# Gateway - -# HTTP GET Request -test_expect_success "GET to Gateway succeeds" ' - curl -svX GET -H "Origin: https://example.com" "http://127.0.0.1:$GWAY_PORT/ipfs/$thash" >/dev/null 2>curl_output && - cat curl_output -' - -# GET Response from Gateway should contain CORS headers -test_expect_success "GET response for Gateway resource looks good" ' - test_should_contain "< Access-Control-Allow-Origin: \*" curl_output && - test_should_contain "< Access-Control-Allow-Methods: GET" curl_output && - test_should_contain "< Access-Control-Allow-Methods: HEAD" curl_output && - test_should_contain "< Access-Control-Allow-Methods: OPTIONS" curl_output && - test_should_contain "< Access-Control-Allow-Headers: Content-Type" curl_output && - test_should_contain "< Access-Control-Allow-Headers: Range" curl_output && - test_should_contain "< Access-Control-Allow-Headers: User-Agent" curl_output && - test_should_contain "< Access-Control-Allow-Headers: X-Requested-With" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Range" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Length" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Chunked-Output" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Stream-Output" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Path" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Roots" curl_output -' -# HTTP OPTIONS Request -test_expect_success "OPTIONS to Gateway succeeds" ' - curl -svX OPTIONS -H "Origin: https://example.com" "http://127.0.0.1:$GWAY_PORT/ipfs/$thash" 2>curl_output && - cat curl_output -' - -# OPTION Response from Gateway should contain CORS headers -test_expect_success "OPTIONS response for Gateway resource looks good" ' - test_should_contain "< Access-Control-Allow-Origin: \*" curl_output && - test_should_contain "< Access-Control-Allow-Methods: GET" curl_output && - test_should_contain "< Access-Control-Allow-Methods: HEAD" curl_output && - test_should_contain "< Access-Control-Allow-Methods: OPTIONS" curl_output && - test_should_contain "< Access-Control-Allow-Headers: Content-Type" curl_output && - test_should_contain "< Access-Control-Allow-Headers: Range" curl_output && - test_should_contain "< Access-Control-Allow-Headers: User-Agent" curl_output && - test_should_contain "< Access-Control-Allow-Headers: X-Requested-With" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Range" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Length" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Chunked-Output" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Stream-Output" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Path" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Roots" curl_output -' - -# HTTP OPTIONS Request on path β†’ subdomain HTTP 301 redirect -# (regression test for https://github.com/ipfs/kubo/issues/9983#issuecomment-1599673976) -test_expect_success "OPTIONS to Gateway succeeds" ' - curl -svX OPTIONS -H "Origin: https://example.com" "http://localhost:$GWAY_PORT/ipfs/$thash" 2>curl_output && - cat curl_output -' -# OPTION Response from Gateway should contain CORS headers -test_expect_success "OPTIONS response for subdomain redirect looks good" ' - test_should_contain "HTTP/1.1 301 Moved Permanently" curl_output && - test_should_contain "Location" curl_output && - test_should_contain "< Access-Control-Allow-Origin: \*" curl_output && - test_should_contain "< Access-Control-Allow-Methods: GET" curl_output -' - -test_kill_ipfs_daemon - -# Test CORS safelisting of custom headers -test_expect_success "Can configure gateway headers" ' - ipfs config --json Gateway.HTTPHeaders.Access-Control-Allow-Headers "[\"X-Custom1\"]" && - ipfs config --json Gateway.HTTPHeaders.Access-Control-Expose-Headers "[\"X-Custom2\"]" && - ipfs config --json Gateway.HTTPHeaders.Access-Control-Allow-Origin "[\"localhost\"]" -' - -test_launch_ipfs_daemon - -test_expect_success "OPTIONS to Gateway without custom headers succeeds" ' - curl -svX OPTIONS -H "Origin: https://example.com" "http://127.0.0.1:$GWAY_PORT/ipfs/$thash" 2>curl_output && - cat curl_output -' -# Range and Content-Range are safelisted by default, and keeping them makes better devexp -# because it does not cause regressions in range requests made by JS -test_expect_success "Access-Control-Allow-Headers extends the implicit list" ' - test_should_contain "< Access-Control-Allow-Headers: Range" curl_output && - test_should_contain "< Access-Control-Allow-Headers: X-Custom1" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Range" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Length" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Path" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Ipfs-Roots" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Custom2" curl_output -' - -test_expect_success "OPTIONS to Gateway with a custom header succeeds" ' - curl -svX OPTIONS -H "Origin: https://example.com" -H "Access-Control-Request-Headers: X-Unexpected-Custom" "http://127.0.0.1:$GWAY_PORT/ipfs/$thash" 2>curl_output && - cat curl_output -' -test_expect_success "Access-Control-Allow-Headers extends the implicit list" ' - test_should_not_contain "< Access-Control-Allow-Headers: X-Unexpected-Custom" curl_output && - test_should_contain "< Access-Control-Allow-Headers: Range" curl_output && - test_should_contain "< Access-Control-Allow-Headers: X-Custom1" curl_output && - test_should_contain "< Access-Control-Expose-Headers: Content-Range" curl_output && - test_should_contain "< Access-Control-Expose-Headers: X-Custom2" curl_output -' - -# Origin is sensitive security perimeter, and we assume override should remove -# any implicit records -test_expect_success "Access-Control-Allow-Origin replaces the implicit list" ' - test_should_contain "< Access-Control-Allow-Origin: localhost" curl_output -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0114-gateway-subdomains.sh b/test/sharness/t0114-gateway-subdomains.sh deleted file mode 100755 index 5d9927d8e46..00000000000 --- a/test/sharness/t0114-gateway-subdomains.sh +++ /dev/null @@ -1,919 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) Protocol Labs - -test_description="Test subdomain support on the HTTP gateway" - - -. lib/test-lib.sh - -## ============================================================================ -## Helpers specific to subdomain tests -## ============================================================================ - -# Helper that tests gateway response over direct HTTP -# and in all supported HTTP proxy modes -test_localhost_gateway_response_should_contain() { - local label="$1" - local expected="$3" - - # explicit "Host: $hostname" header to match browser behavior - # and also make tests independent from DNS - local host=$(echo $2 | cut -d'/' -f3 | cut -d':' -f1) - local hostname=$(echo $2 | cut -d'/' -f3 | cut -d':' -f1,2) - - # Proxy is the same as HTTP Gateway, we use raw IP and port to be sure - local proxy="http://127.0.0.1:$GWAY_PORT" - - # Create a raw URL version with IP to ensure hostname from Host header is used - # (removes false-positives, Host header is used for passing hostname already) - local url="$2" - local rawurl=$(echo "$url" | sed "s/$hostname/127.0.0.1:$GWAY_PORT/") - - #echo "hostname: $hostname" - #echo "url before: $url" - #echo "url after: $rawurl" - - # regular HTTP request - # (hostname in Host header, raw IP in URL) - test_expect_success "$label (direct HTTP)" " - curl -H \"Host: $hostname\" -sD - \"$rawurl\" > response && - test_should_contain \"$expected\" response - " - - # HTTP proxy - # (hostname is passed via URL) - # Note: proxy client should not care, but curl does DNS lookup - # for some reason anyway, so we pass static DNS mapping - test_expect_success "$label (HTTP proxy)" " - curl -x $proxy --resolve $hostname:127.0.0.1 -sD - \"$url\" > response && - test_should_contain \"$expected\" response - " - - # HTTP proxy 1.0 - # (repeating proxy test with older spec, just to be sure) - test_expect_success "$label (HTTP proxy 1.0)" " - curl --proxy1.0 $proxy --resolve $hostname:127.0.0.1 -sD - \"$url\" > response && - test_should_contain \"$expected\" response - " - - # HTTP proxy tunneling (CONNECT) - # https://tools.ietf.org/html/rfc7231#section-4.3.6 - # In HTTP/1.x, the pseudo-method CONNECT - # can be used to convert an HTTP connection into a tunnel to a remote host - test_expect_success "$label (HTTP proxy tunneling)" " - curl --proxytunnel -x $proxy -H \"Host: $hostname\" -sD - \"$rawurl\" > response && - test_should_contain \"$expected\" response - " -} - -# Helper that checks gateway response for specific hostname in Host header -test_hostname_gateway_response_should_contain() { - local label="$1" - local hostname="$2" - local url="$3" - local rawurl=$(echo "$url" | sed "s/$hostname/127.0.0.1:$GWAY_PORT/") - local expected="$4" - test_expect_success "$label" " - curl -H \"Host: $hostname\" -sD - \"$rawurl\" > response && - test_should_contain \"$expected\" response - " -} - -## ============================================================================ -## Start IPFS Node and prepare test CIDs -## ============================================================================ - -test_expect_success "ipfs init" ' - export IPFS_PATH="$(pwd)/.ipfs" && - ipfs init --profile=test > /dev/null -' - -test_launch_ipfs_daemon_without_network - -# Import test case -# See the static fixtures in ./t0114-gateway-subdomains/ -CID_VAL=hello -CIDv1=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am -CIDv0=QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN -CIDv0to1=bafybeiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4 -CIDv1_TOO_LONG=bafkrgqhhyivzstcz3hhswshfjgy6ertgmnqeleynhwt4dlfsthi4hn7zgh4uvlsb5xncykzapi3ocd4lzogukir6ksdy6wzrnz6ohnv4aglcs -DIR_CID=bafybeiht6dtwk3les7vqm6ibpvz6qpohidvlshsfyr7l5mpysdw2vmbbhe - -RSA_KEY=QmVujd5Vb7moysJj8itnGufN7MEtPRCNHkKpNuA4onsRa3 -RSA_IPNS_IDv0=QmVujd5Vb7moysJj8itnGufN7MEtPRCNHkKpNuA4onsRa3 -RSA_IPNS_IDv1=k2k4r8m7xvggw5pxxk3abrkwyer625hg01hfyggrai7lk1m63fuihi7w -RSA_IPNS_IDv1_DAGPB=k2jmtxu61bnhrtj301lw7zizknztocdbeqhxgv76l2q9t36fn9jbzipo - -ED25519_KEY=12D3KooWLQzUv2FHWGVPXTXSZpdHs7oHbXub2G5WC8Tx4NQhyd2d -ED25519_IPNS_IDv0=12D3KooWLQzUv2FHWGVPXTXSZpdHs7oHbXub2G5WC8Tx4NQhyd2d -ED25519_IPNS_IDv1=k51qzi5uqu5dk3v4rmjber23h16xnr23bsggmqqil9z2gduiis5se8dht36dam -ED25519_IPNS_IDv1_DAGPB=k50rm9yjlt0jey4fqg6wafvqprktgbkpgkqdg27tpqje6iimzxewnhvtin9hhq -IPNS_ED25519_B58MH=12D3KooWLQzUv2FHWGVPXTXSZpdHs7oHbXub2G5WC8Tx4NQhyd2d -IPNS_ED25519_B36CID=k51qzi5uqu5dk3v4rmjber23h16xnr23bsggmqqil9z2gduiis5se8dht36dam - -test_expect_success "Add the test fixtures" ' - ipfs dag import --pin-roots ../t0114-gateway-subdomains/fixtures.car && - ipfs routing put --allow-offline /ipns/${RSA_KEY} ../t0114-gateway-subdomains/${RSA_KEY}.ipns-record && - ipfs routing put --allow-offline /ipns/${ED25519_KEY} ../t0114-gateway-subdomains/${ED25519_KEY}.ipns-record -' - -# ensure we start with empty Gateway.PublicGateways -test_expect_success 'start daemon with empty config for Gateway.PublicGateways' ' - test_kill_ipfs_daemon && - ipfs config --json Gateway.PublicGateways "{}" && - test_launch_ipfs_daemon_without_network -' - -## ============================================================================ -## Test path-based requests to a local gateway with default config -## (forced redirects to http://*.localhost) -## ============================================================================ - -# /ipfs/ - -# IP remains old school path-based gateway - -test_localhost_gateway_response_should_contain \ - "request for 127.0.0.1/ipfs/{CID} stays on path" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "$CID_VAL" - -# 'localhost' hostname is used for subdomains, and should not return -# payload directly, but redirect to URL with proper origin isolation - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{CIDv1} returns HTTP 301 Moved Permanently" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1" \ - "301 Moved Permanently" - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{CIDv1} returns Location HTTP header for subdomain redirect in browsers" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.localhost:$GWAY_PORT/" - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{DIR_CID} returns HTTP 301 Moved Permanently" \ - "http://localhost:$GWAY_PORT/ipfs/$DIR_CID" \ - "301 Moved Permanently" - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{DIR_CID} returns Location HTTP header for subdomain redirect in browsers" \ - "http://localhost:$GWAY_PORT/ipfs/$DIR_CID/" \ - "Location: http://$DIR_CID.ipfs.localhost:$GWAY_PORT/" - -# Kubo specific end-to-end test -# (independend of gateway-conformance) - -# We return human-readable body with HTTP 301 so existing cli scripts that use path-based -# gateway are informed to enable following HTTP redirects -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{CIDv1} includes human-readable link and redirect info in HTTP 301 body" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1" \ - ">Moved Permanently" - -# end Kubo specific end-to-end test - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{CIDv0} redirects to CIDv1 representation in subdomain" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv0" \ - "Location: http://${CIDv0to1}.ipfs.localhost:$GWAY_PORT/" - -# /ipns/ - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipns/{CIDv0} redirects to CIDv1 with libp2p-key multicodec in subdomain" \ - "http://localhost:$GWAY_PORT/ipns/$RSA_IPNS_IDv0" \ - "Location: http://${RSA_IPNS_IDv1}.ipns.localhost:$GWAY_PORT/" - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipns/{CIDv0} redirects to CIDv1 with libp2p-key multicodec in subdomain" \ - "http://localhost:$GWAY_PORT/ipns/$ED25519_IPNS_IDv0" \ - "Location: http://${ED25519_IPNS_IDv1}.ipns.localhost:$GWAY_PORT/" - -# /ipns/ - -# Kubo specific end-to-end test -# (independend of gateway-conformance) - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipns/{fqdn} redirects to DNSLink in subdomain" \ - "http://localhost:$GWAY_PORT/ipns/en.wikipedia-on-ipfs.org/wiki" \ - "Location: http://en.wikipedia-on-ipfs.org.ipns.localhost:$GWAY_PORT/wiki" - -# end Kubo specific end-to-end test - -## ============================================================================ -## Test subdomain-based requests to a local gateway with default config -## (origin per content root at http://*.localhost) -## ============================================================================ - -# {CID}.ipfs.localhost - -test_localhost_gateway_response_should_contain \ - "request for {CID}.ipfs.localhost should return expected payload" \ - "http://${CIDv1}.ipfs.localhost:$GWAY_PORT" \ - "$CID_VAL" - -# ensure /ipfs/ namespace is not mounted on subdomain -test_localhost_gateway_response_should_contain \ - "request for {CID}.ipfs.localhost/ipfs/{CID} should return HTTP 404" \ - "http://${CIDv1}.ipfs.localhost:$GWAY_PORT/ipfs/$CIDv1" \ - "404 Not Found" - -# ensure requests to /ipfs/* are not blocked, if content root has such subdirectory -test_localhost_gateway_response_should_contain \ - "request for {CID}.ipfs.localhost/ipfs/file.txt should return data from a file in CID content root" \ - "http://${DIR_CID}.ipfs.localhost:$GWAY_PORT/ipfs/file.txt" \ - "I am a txt file" - -# Kubo specific end-to-end test -# (independend of gateway-conformance) -# This tests link to parent specific to boxo + relative pathing end-to-end tests specific to Kubo. - -# {CID}.ipfs.localhost/sub/dir (Directory Listing) -DIR_HOSTNAME="${DIR_CID}.ipfs.localhost:$GWAY_PORT" - -test_expect_success "valid file and subdirectory paths in directory listing at {cid}.ipfs.localhost" ' - curl -s --resolve $DIR_HOSTNAME:127.0.0.1 "http://$DIR_HOSTNAME" > list_response && - test_should_contain "hello" list_response && - test_should_contain "ipfs" list_response -' - -test_expect_success "valid parent directory path in directory listing at {cid}.ipfs.localhost/sub/dir" ' - curl -s --resolve $DIR_HOSTNAME:127.0.0.1 "http://$DIR_HOSTNAME/ipfs/ipns/" > list_response && - test_should_contain ".." list_response && - test_should_contain "bar" list_response -' - -test_expect_success "request for deep path resource at {cid}.ipfs.localhost/sub/dir/file" ' - curl -s --resolve $DIR_HOSTNAME:127.0.0.1 "http://$DIR_HOSTNAME/ipfs/ipns/bar" > list_response && - test_should_contain "text-file-content" list_response -' -# end Kubo specific end-to-end test - -# *.ipns.localhost - -# .ipns.localhost - -test_localhost_gateway_response_should_contain \ - "request for {CIDv1-libp2p-key}.ipns.localhost returns expected payload" \ - "http://${RSA_IPNS_IDv1}.ipns.localhost:$GWAY_PORT" \ - "$CID_VAL" - -test_localhost_gateway_response_should_contain \ - "request for {CIDv1-libp2p-key}.ipns.localhost returns expected payload" \ - "http://${ED25519_IPNS_IDv1}.ipns.localhost:$GWAY_PORT" \ - "$CID_VAL" - -test_localhost_gateway_response_should_contain \ - "localhost request for {CIDv1-dag-pb}.ipns.localhost redirects to CID with libp2p-key multicodec" \ - "http://${RSA_IPNS_IDv1_DAGPB}.ipns.localhost:$GWAY_PORT" \ - "Location: http://${RSA_IPNS_IDv1}.ipns.localhost:$GWAY_PORT/" - -test_localhost_gateway_response_should_contain \ - "localhost request for {CIDv1-dag-pb}.ipns.localhost redirects to CID with libp2p-key multicodec" \ - "http://${ED25519_IPNS_IDv1_DAGPB}.ipns.localhost:$GWAY_PORT" \ - "Location: http://${ED25519_IPNS_IDv1}.ipns.localhost:$GWAY_PORT/" - -# .ipns.localhost - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -test_kill_ipfs_daemon -DNSLINK_FQDN="dnslink-test.example.com" -export IPFS_NS_MAP="$DNSLINK_FQDN:/ipfs/$CIDv1" -test_launch_ipfs_daemon - -test_localhost_gateway_response_should_contain \ - "request for {dnslink}.ipns.localhost returns expected payload" \ - "http://$DNSLINK_FQDN.ipns.localhost:$GWAY_PORT" \ - "$CID_VAL" - -## ============================================================================ -## Test DNSLink inlining on HTTP gateways -## ============================================================================ - -# set explicit subdomain gateway config for the hostname -ipfs config --json Gateway.PublicGateways '{ - "localhost": { - "UseSubdomains": true, - "InlineDNSLink": true, - "Paths": ["/ipfs", "/ipns", "/api"] - }, - "example.com": { - "UseSubdomains": true, - "InlineDNSLink": true, - "Paths": ["/ipfs", "/ipns", "/api"] - } -}' || exit 1 -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipns/{fqdn} redirects to DNSLink in subdomain with DNS inlining" \ - "http://localhost:$GWAY_PORT/ipns/en.wikipedia-on-ipfs.org/wiki" \ - "Location: http://en-wikipedia--on--ipfs-org.ipns.localhost:$GWAY_PORT/wiki" - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/{fqdn} redirects to DNSLink in subdomain with DNS inlining" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/en.wikipedia-on-ipfs.org/wiki" \ - "Location: http://en-wikipedia--on--ipfs-org.ipns.example.com/wiki" - -## ============================================================================ -## Test subdomain-based requests with a custom hostname config -## (origin per content root at http://*.example.com) -## ============================================================================ - -# set explicit subdomain gateway config for the hostname -ipfs config --json Gateway.PublicGateways '{ - "example.com": { - "UseSubdomains": true, - "Paths": ["/ipfs", "/ipns", "/api"] - } -}' || exit 1 -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - - -# example.com/ip(f|n)s/* -# ============================================================================= - -# path requests to the root hostname should redirect -# to a subdomain URL with proper origin isolation - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipfs/{CIDv1} produces redirect to {CIDv1}.ipfs.example.com" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.example.com/" - -# error message should include original CID -# (and it should be case-sensitive, as we can't assume everyone uses base32) -test_hostname_gateway_response_should_contain \ - "request for example.com/ipfs/{InvalidCID} produces useful error before redirect" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/QmInvalidCID" \ - 'invalid path \"/ipfs/QmInvalidCID\"' - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipfs/{CIDv0} produces redirect to {CIDv1}.ipfs.example.com" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv0" \ - "Location: http://${CIDv0to1}.ipfs.example.com/" - -# Support X-Forwarded-Proto -test_expect_success "request for http://example.com/ipfs/{CID} with X-Forwarded-Proto: https produces redirect to HTTPS URL" " - curl -H \"X-Forwarded-Proto: https\" -H \"Host: example.com\" -sD - \"http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1\" > response && - test_should_contain \"Location: https://$CIDv1.ipfs.example.com/\" response -" - -# Support ipfs:// in https://developer.mozilla.org/en-US/docs/Web/API/Navigator/registerProtocolHandler -test_hostname_gateway_response_should_contain \ - "request for example.com/ipfs/?uri=ipfs%3A%2F%2F.. produces redirect to /ipfs/.. content path" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/?uri=ipfs%3A%2F%2FQmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco%2Fwiki%2FDiego_Maradona.html" \ - "Location: /ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/Diego_Maradona.html" - -# example.com/ipns/ - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/{CIDv0} redirects to CIDv1 with libp2p-key multicodec in subdomain" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$RSA_IPNS_IDv0" \ - "Location: http://${RSA_IPNS_IDv1}.ipns.example.com/" - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/{CIDv0} redirects to CIDv1 with libp2p-key multicodec in subdomain" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$ED25519_IPNS_IDv0" \ - "Location: http://${ED25519_IPNS_IDv1}.ipns.example.com/" - -# example.com/ipns/ - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/{fqdn} redirects to DNSLink in subdomain" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/en.wikipedia-on-ipfs.org/wiki" \ - "Location: http://en.wikipedia-on-ipfs.org.ipns.example.com/wiki" - -# DNSLink on Public gateway with a single-level wildcard TLS cert -# "Option C" from https://github.com/ipfs/in-web-browsers/issues/169 -test_expect_success \ - "request for example.com/ipns/{fqdn} with X-Forwarded-Proto redirects to TLS-safe label in subdomain" " - curl -H \"Host: example.com\" -H \"X-Forwarded-Proto: https\" -sD - \"http://127.0.0.1:$GWAY_PORT/ipns/en.wikipedia-on-ipfs.org/wiki\" > response && - test_should_contain \"Location: https://en-wikipedia--on--ipfs-org.ipns.example.com/wiki\" response - " - -# Support ipns:// in https://developer.mozilla.org/en-US/docs/Web/API/Navigator/registerProtocolHandler -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/?uri=ipns%3A%2F%2F.. produces redirect to /ipns/.. content path" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/?uri=ipns%3A%2F%2Fen.wikipedia-on-ipfs.org" \ - "Location: /ipns/en.wikipedia-on-ipfs.org" - -# *.ipfs.example.com: subdomain requests made with custom FQDN in Host header - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.example.com should return expected payload" \ - "${CIDv1}.ipfs.example.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.example.com/ipfs/{CID} should return HTTP 404" \ - "${CIDv1}.ipfs.example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "404 Not Found" - -# Kubo specific end-to-end test -# (independend of gateway-conformance) -# HTML specific to Boxo/Kubo, and relative pathing specific to code in Kubo - -# {CID}.ipfs.example.com/sub/dir (Directory Listing) -DIR_FQDN="${DIR_CID}.ipfs.example.com" - -test_expect_success "valid file and directory paths in directory listing at {cid}.ipfs.example.com" ' - curl -s -H "Host: $DIR_FQDN" http://127.0.0.1:$GWAY_PORT > list_response && - test_should_contain "hello" list_response && - test_should_contain "ipfs" list_response -' - -test_expect_success "valid parent directory path in directory listing at {cid}.ipfs.example.com/sub/dir" ' - curl -s -H "Host: $DIR_FQDN" http://127.0.0.1:$GWAY_PORT/ipfs/ipns/ > list_response && - test_should_contain ".." list_response && - test_should_contain "bar" list_response -' - -# Note 1: we test for sneaky subdir names {cid}.ipfs.example.com/ipfs/ipns/ :^) -# Note 2: example.com/ipfs/.. present in HTML will be redirected to subdomain, so this is expected behavior -test_expect_success "valid breadcrumb links in the header of directory listing at {cid}.ipfs.example.com/sub/dir" ' - curl -s -H "Host: $DIR_FQDN" http://127.0.0.1:$GWAY_PORT/ipfs/ipns/ > list_response && - test_should_contain "Index of" list_response && - test_should_contain "/ipfs/${DIR_CID}/ipfs/ipns" list_response -' - -# end Kubo specific end-to-end test - -test_expect_success "request for deep path resource {cid}.ipfs.example.com/sub/dir/file" ' - curl -s -H "Host: $DIR_FQDN" http://127.0.0.1:$GWAY_PORT/ipfs/ipns/bar > list_response && - test_should_contain "text-file-content" list_response -' - -# *.ipns.example.com -# ============================================================================ - -# .ipns.example.com - -test_hostname_gateway_response_should_contain \ - "request for {CIDv1-libp2p-key}.ipns.example.com returns expected payload" \ - "${RSA_IPNS_IDv1}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "$CID_VAL" - -test_hostname_gateway_response_should_contain \ - "request for {CIDv1-libp2p-key}.ipns.example.com returns expected payload" \ - "${ED25519_IPNS_IDv1}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "$CID_VAL" - -test_hostname_gateway_response_should_contain \ - "hostname request for {CIDv1-dag-pb}.ipns.localhost redirects to CID with libp2p-key multicodec" \ - "${RSA_IPNS_IDv1_DAGPB}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "Location: http://${RSA_IPNS_IDv1}.ipns.example.com/" - -test_hostname_gateway_response_should_contain \ - "hostname request for {CIDv1-dag-pb}.ipns.localhost redirects to CID with libp2p-key multicodec" \ - "${ED25519_IPNS_IDv1_DAGPB}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "Location: http://${ED25519_IPNS_IDv1}.ipns.example.com/" - -# DNSLink: .ipns.example.com -# (not really useful outside of localhost, as setting TLS for more than one -# level of wildcard is a pain, but we support it if someone really wants it) -# ============================================================================ - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -test_kill_ipfs_daemon -DNSLINK_FQDN="dnslink-subdomain-gw-test.example.org" -export IPFS_NS_MAP="$DNSLINK_FQDN:/ipfs/$CIDv1" -test_launch_ipfs_daemon - -test_hostname_gateway_response_should_contain \ - "request for {dnslink}.ipns.example.com returns expected payload" \ - "$DNSLINK_FQDN.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "$CID_VAL" - -# DNSLink on Public gateway with a single-level wildcard TLS cert -# "Option C" from https://github.com/ipfs/in-web-browsers/issues/169 -test_expect_success \ - "request for {single-label-dnslink}.ipns.example.com with X-Forwarded-Proto returns expected payload" " - curl -H \"Host: dnslink--subdomain--gw--test-example-org.ipns.example.com\" -H \"X-Forwarded-Proto: https\" -sD - \"http://127.0.0.1:$GWAY_PORT\" > response && - test_should_contain \"$CID_VAL\" response - " - -## Test subdomain handling of CIDs that do not fit in a single DNS Label (>63chars) -## https://github.com/ipfs/go-ipfs/issues/7318 -## ============================================================================ - -# local: *.localhost -test_localhost_gateway_response_should_contain \ - "request for a ED25519 libp2p-key at localhost/ipns/{b58mh} returns Location HTTP header for DNS-safe subdomain redirect in browsers" \ - "http://localhost:$GWAY_PORT/ipns/$IPNS_ED25519_B58MH" \ - "Location: http://${IPNS_ED25519_B36CID}.ipns.localhost:$GWAY_PORT/" - -# router should not redirect to hostnames that could fail due to DNS limits -test_localhost_gateway_response_should_contain \ - "request for a too long CID at localhost/ipfs/{CIDv1} returns human readable error" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1_TOO_LONG" \ - "CID incompatible with DNS label length limit of 63" - -test_localhost_gateway_response_should_contain \ - "request for a too long CID at localhost/ipfs/{CIDv1} returns HTTP Error 400 Bad Request" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1_TOO_LONG" \ - "400 Bad Request" - -# direct request should also fail (provides the same UX as router and avoids confusion) -test_localhost_gateway_response_should_contain \ - "request for a too long CID at {CIDv1}.ipfs.localhost returns expected payload" \ - "http://$CIDv1_TOO_LONG.ipfs.localhost:$GWAY_PORT" \ - "400 Bad Request" - -# public subdomain gateway: *.example.com - -test_hostname_gateway_response_should_contain \ - "request for a ED25519 libp2p-key at example.com/ipns/{b58mh} returns Location HTTP header for DNS-safe subdomain redirect in browsers" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$IPNS_ED25519_B58MH" \ - "Location: http://${IPNS_ED25519_B36CID}.ipns.example.com" - -test_hostname_gateway_response_should_contain \ - "request for a too long CID at example.com/ipfs/{CIDv1} returns human readable error" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1_TOO_LONG" \ - "CID incompatible with DNS label length limit of 63" - -test_hostname_gateway_response_should_contain \ - "request for a too long CID at example.com/ipfs/{CIDv1} returns HTTP Error 400 Bad Request" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1_TOO_LONG" \ - "400 Bad Request" - -test_hostname_gateway_response_should_contain \ - "request for a too long CID at {CIDv1}.ipfs.example.com returns HTTP Error 400 Bad Request" \ - "$CIDv1_TOO_LONG.ipfs.example.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "400 Bad Request" - -# Disable selected Paths for the subdomain gateway hostname -# ============================================================================= - -# disable /ipns for the hostname by not whitelisting it -ipfs config --json Gateway.PublicGateways '{ - "example.com": { - "UseSubdomains": true, - "Paths": ["/ipfs"] - } -}' || exit 1 -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -# refuse requests to Paths that were not explicitly whitelisted for the hostname -test_hostname_gateway_response_should_contain \ - "request for *.ipns.example.com returns HTTP 404 Not Found when /ipns is not on Paths whitelist" \ - "${RSA_IPNS_IDv1}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "404 Not Found" - -test_hostname_gateway_response_should_contain \ - "request for *.ipns.example.com returns HTTP 404 Not Found when /ipns is not on Paths whitelist" \ - "${ED25519_IPNS_IDv1}.ipns.example.com" \ - "http://127.0.0.1:$GWAY_PORT" \ - "404 Not Found" - -## ============================================================================ -## Test path-based requests with a custom hostname config -## ============================================================================ - -# set explicit no-subdomain gateway config for the hostname -ipfs config --json Gateway.PublicGateways '{ - "example.com": { - "UseSubdomains": false, - "Paths": ["/ipfs"] - } -}' || exit 1 - -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -# example.com/ip(f|n)s/* smoke-tests -# ============================================================================= - -# confirm path gateway works for /ipfs -test_hostname_gateway_response_should_contain \ - "request for example.com/ipfs/{CIDv1} returns expected payload" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "$CID_VAL" - -# refuse subdomain requests on path gateway -# (we don't want false sense of security) -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.example.com/ipfs/{CID} should return HTTP 404 Not Found" \ - "${CIDv1}.ipfs.example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "404 Not Found" - -# refuse requests to Paths that were not explicitly whitelisted for the hostname -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/ returns HTTP 404 Not Found when /ipns is not on Paths whitelist" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$RSA_IPNS_IDv1" \ - "404 Not Found" - -test_hostname_gateway_response_should_contain \ - "request for example.com/ipns/ returns HTTP 404 Not Found when /ipns is not on Paths whitelist" \ - "example.com" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$ED25519_IPNS_IDv1" \ - "404 Not Found" - -## ============================================================================ -## Test DNSLink requests with a custom PublicGateway (hostname config) -## (DNSLink site at http://dnslink-test.example.com) -## ============================================================================ - -test_kill_ipfs_daemon - -# disable wildcard DNSLink gateway -# and enable it on specific NSLink hostname -ipfs config --json Gateway.NoDNSLink true && \ -ipfs config --json Gateway.PublicGateways '{ - "dnslink-enabled-on-fqdn.example.org": { - "NoDNSLink": false, - "UseSubdomains": false, - "Paths": ["/ipfs"] - }, - "only-dnslink-enabled-on-fqdn.example.org": { - "NoDNSLink": false, - "UseSubdomains": false, - "Paths": [] - }, - "dnslink-disabled-on-fqdn.example.com": { - "NoDNSLink": true, - "UseSubdomains": false, - "Paths": [] - } -}' || exit 1 - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -DNSLINK_FQDN="dnslink-enabled-on-fqdn.example.org" -ONLY_DNSLINK_FQDN="only-dnslink-enabled-on-fqdn.example.org" -NO_DNSLINK_FQDN="dnslink-disabled-on-fqdn.example.com" -export IPFS_NS_MAP="$DNSLINK_FQDN:/ipfs/$CIDv1,$ONLY_DNSLINK_FQDN:/ipfs/$DIR_CID" - -# restart daemon to apply config changes -test_launch_ipfs_daemon - -# make sure test setup is valid (fail if CoreAPI is unable to resolve) -test_expect_success "spoofed DNSLink record resolves in cli" " - ipfs resolve /ipns/$DNSLINK_FQDN > result && - test_should_contain \"$CIDv1\" result && - ipfs cat /ipns/$DNSLINK_FQDN > result && - test_should_contain \"$CID_VAL\" result -" - -# DNSLink enabled - -test_hostname_gateway_response_should_contain \ - "request for http://{dnslink-fqdn}/ PublicGateway returns expected payload" \ - "$DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -test_hostname_gateway_response_should_contain \ - "request for {dnslink-fqdn}/ipfs/{cid} returns expected payload when /ipfs is on Paths whitelist" \ - "$DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "$CID_VAL" - -# Test for a fun edge case: DNSLink-only gateway without /ipfs/ namespace -# mounted, and with subdirectory named "ipfs" Β―\_(ツ)_/Β― -test_hostname_gateway_response_should_contain \ - "request for {dnslink-fqdn}/ipfs/file.txt returns data from content root when /ipfs in not on Paths whitelist" \ - "$ONLY_DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/file.txt" \ - "I am a txt file" - -test_hostname_gateway_response_should_contain \ - "request for {dnslink-fqdn}/ipns/{peerid} returns 404 when path is not whitelisted" \ - "$DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$RSA_IPNS_IDv0" \ - "404 Not Found" - -test_hostname_gateway_response_should_contain \ - "request for {dnslink-fqdn}/ipns/{peerid} returns 404 when path is not whitelisted" \ - "$DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/ipns/$ED25519_IPNS_IDv0" \ - "404 Not Found" - -# DNSLink disabled - -test_hostname_gateway_response_should_contain \ - "request for http://{dnslink-fqdn}/ returns 404 when NoDNSLink=true" \ - "$NO_DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "404 Not Found" - -test_hostname_gateway_response_should_contain \ - "request for {dnslink-fqdn}/ipfs/{cid} returns 404 when path is not whitelisted" \ - "$NO_DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv0" \ - "404 Not Found" - - -## ============================================================================ -## Test wildcard DNSLink (any hostname, with default config) -## ============================================================================ - -test_kill_ipfs_daemon - -# enable wildcard DNSLink gateway (any value in Host header) -# and remove custom PublicGateways -ipfs config --json Gateway.NoDNSLink false && \ -ipfs config --json Gateway.PublicGateways '{}' || exit 1 - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -DNSLINK_FQDN="wildcard-dnslink-not-in-config.example.com" -export IPFS_NS_MAP="$DNSLINK_FQDN:/ipfs/$CIDv1" - -# restart daemon to apply config changes -test_launch_ipfs_daemon - -# make sure test setup is valid (fail if CoreAPI is unable to resolve) -test_expect_success "spoofed DNSLink record resolves in cli" " - ipfs resolve /ipns/$DNSLINK_FQDN > result && - test_should_contain \"$CIDv1\" result && - ipfs cat /ipns/$DNSLINK_FQDN > result && - test_should_contain \"$CID_VAL\" result -" - -# gateway test -test_hostname_gateway_response_should_contain \ - "request for http://{dnslink-fqdn}/ (wildcard) returns expected payload" \ - "$DNSLINK_FQDN" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -## ============================================================================ -## Test support for X-Forwarded-Host -## ============================================================================ - -# set explicit subdomain gateway config for the hostname -ipfs config --json Gateway.PublicGateways '{ - "example.com": { - "UseSubdomains": true, - "Paths": ["/ipfs", "/ipns", "/api"] - } -}' || exit 1 -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -test_expect_success "request for http://fake.domain.com/ipfs/{CID} doesn't match the example.com gateway" " - curl -H \"Host: fake.domain.com\" -sD - \"http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1\" > response && - test_should_contain \"200 OK\" response -" - -test_expect_success "request for http://fake.domain.com/ipfs/{CID} with X-Forwarded-Host: example.com match the example.com gateway" " - curl -H \"Host: fake.domain.com\" -H \"X-Forwarded-Host: example.com\" -sD - \"http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1\" > response && - test_should_contain \"Location: http://$CIDv1.ipfs.example.com/\" response -" - -test_expect_success "request for http://fake.domain.com/ipfs/{CID} with X-Forwarded-Host: example.com and X-Forwarded-Proto: https match the example.com gateway, redirect with https" " - curl -H \"Host: fake.domain.com\" -H \"X-Forwarded-Host: example.com\" -H \"X-Forwarded-Proto: https\" -sD - \"http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1\" > response && - test_should_contain \"Location: https://$CIDv1.ipfs.example.com/\" response -" - -# Kubo specific end-to-end test -# (independend of gateway-conformance) -# test cofiguration beign wired up correctly end-to-end - -## ============================================================================ -## Test support for wildcards in gateway config -## ============================================================================ - -# set explicit subdomain gateway config for the hostnames -ipfs config --json Gateway.PublicGateways '{ - "*.example1.com": { - "UseSubdomains": true, - "Paths": ["/ipfs"] - }, - "*.*.example2.com": { - "UseSubdomains": true, - "Paths": ["/ipfs"] - }, - "foo.*.example3.com": { - "UseSubdomains": true, - "Paths": ["/ipfs"] - }, - "foo.bar-*-boo.example4.com": { - "UseSubdomains": true, - "Paths": ["/ipfs"] - } -}' || exit 1 -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -# *.example1.com - -test_hostname_gateway_response_should_contain \ - "request for foo.example1.com/ipfs/{CIDv1} produces redirect to {CIDv1}.ipfs.foo.example1.com" \ - "foo.example1.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.foo.example1.com/" - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.foo.example1.com should return expected payload" \ - "${CIDv1}.ipfs.foo.example1.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -# *.*.example2.com - -test_hostname_gateway_response_should_contain \ - "request for foo.bar.example2.com/ipfs/{CIDv1} produces redirect to {CIDv1}.ipfs.foo.bar.example2.com" \ - "foo.bar.example2.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.foo.bar.example2.com/" - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.foo.bar.example2.com should return expected payload" \ - "${CIDv1}.ipfs.foo.bar.example2.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -# foo.*.example3.com - -test_hostname_gateway_response_should_contain \ - "request for foo.bar.example3.com/ipfs/{CIDv1} produces redirect to {CIDv1}.ipfs.foo.bar.example3.com" \ - "foo.bar.example3.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.foo.bar.example3.com/" - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.foo.bar.example3.com should return expected payload" \ - "${CIDv1}.ipfs.foo.bar.example3.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -# foo.bar-*-boo.example4.com - -test_hostname_gateway_response_should_contain \ - "request for foo.bar-dev-boo.example4.com/ipfs/{CIDv1} produces redirect to {CIDv1}.ipfs.foo.bar-dev-boo.example4.com" \ - "foo.bar-dev-boo.example4.com" \ - "http://127.0.0.1:$GWAY_PORT/ipfs/$CIDv1" \ - "Location: http://$CIDv1.ipfs.foo.bar-dev-boo.example4.com/" - -test_hostname_gateway_response_should_contain \ - "request for {CID}.ipfs.foo.bar-dev-boo.example4.com should return expected payload" \ - "${CIDv1}.ipfs.foo.bar-dev-boo.example4.com" \ - "http://127.0.0.1:$GWAY_PORT/" \ - "$CID_VAL" - -## ============================================================================ -## Test support for overriding implicit defaults -## ============================================================================ - -# disable subdomain gateway at localhost by removing implicit config -ipfs config --json Gateway.PublicGateways '{ - "localhost": null -}' || exit 1 - -# restart daemon to apply config changes -test_kill_ipfs_daemon -test_launch_ipfs_daemon_without_network - -test_localhost_gateway_response_should_contain \ - "request for localhost/ipfs/{CID} stays on path when subdomain gw is explicitly disabled" \ - "http://localhost:$GWAY_PORT/ipfs/$CIDv1" \ - "$CID_VAL" - -# ============================================================================= -# ensure we end with empty Gateway.PublicGateways -ipfs config --json Gateway.PublicGateways '{}' -test_kill_ipfs_daemon - -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_PATH" -' - -test_done - -# end Kubo specific end-to-end test \ No newline at end of file diff --git a/test/sharness/t0115-gateway-dir-listing.sh b/test/sharness/t0115-gateway-dir-listing.sh deleted file mode 100755 index d4e08e5be2e..00000000000 --- a/test/sharness/t0115-gateway-dir-listing.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) Protocol Labs - -test_description="Test directory listing (dir-index-html) on the HTTP gateway" - - -. lib/test-lib.sh - -## ============================================================================ -## Start IPFS Node and prepare test CIDs -## ============================================================================ - -test_expect_success "ipfs init" ' - export IPFS_PATH="$(pwd)/.ipfs" && - ipfs init --profile=test > /dev/null -' - -test_launch_ipfs_daemon_without_network - -# Import test case -# See the static fixtures in ./t0115-gateway-dir-listing/ -test_expect_success "Add the test directory" ' - ipfs dag import --pin-roots ../t0115-gateway-dir-listing/fixtures.car -' -DIR_CID=bafybeig6ka5mlwkl4subqhaiatalkcleo4jgnr3hqwvpmsqfca27cijp3i # ./rootDir/ -FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/Δ…/Δ™/file-ΕΊΕ‚.txt -FILE_SIZE=34 - -## ============================================================================ -## Test dir listing on path gateway (eg. 127.0.0.1:8080/ipfs/) -## ============================================================================ - -test_expect_success "path gw: backlink on root CID should be hidden" ' - curl -sD - http://127.0.0.1:$GWAY_PORT/ipfs/${DIR_CID}/ > list_response && - test_should_contain "Index of" list_response && - test_should_not_contain ".." list_response -' - -test_expect_success "path gw: redirect dir listing to URL with trailing slash" ' - curl -sD - http://127.0.0.1:$GWAY_PORT/ipfs/${DIR_CID}/Δ…/Δ™ > list_response && - test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /ipfs/${DIR_CID}/%C4%85/%C4%99/" list_response -' - -test_expect_success "path gw: Etag should be present" ' - curl -sD - http://127.0.0.1:$GWAY_PORT/ipfs/${DIR_CID}/Δ…/Δ™/ > list_response && - test_should_contain "Index of" list_response && - test_should_contain "Etag: \"DirIndex-" list_response -' - -test_expect_success "path gw: breadcrumbs should point at /ipfs namespace mounted at Origin root" ' - test_should_contain "/ipfs/$DIR_CID/Δ…/Δ™" list_response -' - -test_expect_success "path gw: backlink on subdirectory should point at parent directory" ' - test_should_contain ".." list_response -' - -test_expect_success "path gw: name column should be a link to its content path" ' - test_should_contain "file-ΕΊΕ‚.txt" list_response -' - -test_expect_success "path gw: hash column should be a CID link with filename param" ' - test_should_contain "" list_response -' - -## ============================================================================ -## Test dir listing on subdomain gateway (eg. .ipfs.localhost:8080) -## ============================================================================ - -DIR_HOSTNAME="${DIR_CID}.ipfs.localhost" -# note: we skip DNS lookup by running curl with --resolve $DIR_HOSTNAME:127.0.0.1 - -test_expect_success "subdomain gw: backlink on root CID should be hidden" ' - curl -sD - --resolve $DIR_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DIR_HOSTNAME:$GWAY_PORT/ > list_response && - test_should_contain "Index of" list_response && - test_should_not_contain ".." list_response -' - -test_expect_success "subdomain gw: redirect dir listing to URL with trailing slash" ' - curl -sD - --resolve $DIR_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DIR_HOSTNAME:$GWAY_PORT/Δ…/Δ™ > list_response && - test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /%C4%85/%C4%99/" list_response -' - -test_expect_success "subdomain gw: Etag should be present" ' - curl -sD - --resolve $DIR_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DIR_HOSTNAME:$GWAY_PORT/Δ…/Δ™/ > list_response && - test_should_contain "Index of" list_response && - test_should_contain "Etag: \"DirIndex-" list_response -' - -test_expect_success "subdomain gw: backlink on subdirectory should point at parent directory" ' - test_should_contain ".." list_response -' - -test_expect_success "subdomain gw: breadcrumbs should leverage path-based router mounted on the parent domain" ' - test_should_contain "/ipfs/$DIR_CID/Δ…/Δ™" list_response -' - -test_expect_success "subdomain gw: name column should be a link to content root mounted at subdomain origin" ' - test_should_contain "file-ΕΊΕ‚.txt" list_response -' - -test_expect_success "subdomain gw: hash column should be a CID link to path router with filename param" ' - test_should_contain "" list_response -' - -## ============================================================================ -## Test dir listing on DNSLink gateway (eg. example.com) -## ============================================================================ - -# DNSLink test requires a daemon in online mode with precached /ipns/ mapping -test_kill_ipfs_daemon -DNSLINK_HOSTNAME="website.example.com" -export IPFS_NS_MAP="$DNSLINK_HOSTNAME:/ipfs/$DIR_CID" -test_launch_ipfs_daemon - -# Note that: -# - this type of gateway is also tested in gateway_test.go#TestIPNSHostnameBacklinks -# (go tests and sharness tests should be kept in sync) -# - we skip DNS lookup by running curl with --resolve $DNSLINK_HOSTNAME:127.0.0.1 - -test_expect_success "dnslink gw: backlink on root CID should be hidden" ' - curl -v -sD - --resolve $DNSLINK_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DNSLINK_HOSTNAME:$GWAY_PORT/ > list_response && - test_should_contain "Index of" list_response && - test_should_not_contain ".." list_response -' - -test_expect_success "dnslink gw: redirect dir listing to URL with trailing slash" ' - curl -sD - --resolve $DNSLINK_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DNSLINK_HOSTNAME:$GWAY_PORT/Δ…/Δ™ > list_response && - test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /%C4%85/%C4%99/" list_response -' - -test_expect_success "dnslink gw: Etag should be present" ' - curl -sD - --resolve $DNSLINK_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DNSLINK_HOSTNAME:$GWAY_PORT/Δ…/Δ™/ > list_response && - test_should_contain "Index of" list_response && - test_should_contain "Etag: \"DirIndex-" list_response -' - -test_expect_success "dnslink gw: backlink on subdirectory should point at parent directory" ' - test_should_contain ".." list_response -' - -test_expect_success "dnslink gw: breadcrumbs should point at content root mounted at dnslink origin" ' - test_should_contain "/ipns/website.example.com/Δ…/Δ™" list_response -' - -test_expect_success "dnslink gw: name column should be a link to content root mounted at dnslink origin" ' - test_should_contain "file-ΕΊΕ‚.txt" list_response -' - -# DNSLink websites don't have public gateway mounted by default -# See: https://github.com/ipfs/dir-index-html/issues/42 -test_expect_success "dnslink gw: hash column should be a CID link to cid.ipfs.tech" ' - test_should_contain "" list_response -' - -## ============================================================================ -## End of tests, cleanup -## ============================================================================ - -test_kill_ipfs_daemon -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_PATH" -' -test_done diff --git a/test/sharness/t0116-gateway-cache.sh b/test/sharness/t0116-gateway-cache.sh deleted file mode 100755 index 6dd81657c99..00000000000 --- a/test/sharness/t0116-gateway-cache.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test HTTP Gateway Cache Control Support" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon_without_network - -# Cache control support is based on logical roots (each path segment == one logical root). -# To maximize the test surface, we want to test: -# - /ipfs/ content path -# - /ipns/ content path -# - at least 3 levels -# - separate tests for a directory listing and a file -# - have implicit index.html for a good measure -# /ipns/root1/root2/root3/ (/ipns/root1/root2/root3/index.html) - -# Note: we cover important UnixFS-focused edge case here: -# -# ROOT3_CID - dir listing (dir-index-html response) -# ROOT4_CID - index.html returned as a root response (dir/), instead of generated dir-index-html -# FILE_CID - index.html returned directly, as a file -# -# Caching of things like raw blocks, CARs, dag-json and dag-cbor -# is tested in their respective suites. - -ROOT1_CID=bafybeib3ffl2teiqdncv3mkz4r23b5ctrwkzrrhctdbne6iboayxuxk5ui # ./ -ROOT2_CID=bafybeih2w7hjocxjg6g2ku25hvmd53zj7og4txpby3vsusfefw5rrg5sii # ./root2 -ROOT3_CID=bafybeiawdvhmjcz65x5egzx4iukxc72hg4woks6v6fvgyupiyt3oczk5ja # ./root2/root3 -ROOT4_CID=bafybeifq2rzpqnqrsdupncmkmhs3ckxxjhuvdcbvydkgvch3ms24k5lo7q # ./root2/root3/root4 -FILE_CID=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am # ./root2/root3/root4/index.html -TEST_IPNS_ID=k51qzi5uqu5dlxdsdu5fpuu7h69wu4ohp32iwm9pdt9nq3y5rpn3ln9j12zfhe - -# Import test case -# See the static fixtures in ./t0116-gateway-cache/ -test_expect_success "Add the test directory" ' - ipfs dag import --pin-roots ../t0116-gateway-cache/fixtures.car - ipfs routing put --allow-offline /ipns/${TEST_IPNS_ID} ../t0116-gateway-cache/${TEST_IPNS_ID}.ipns-record -' - -# Etag - test_expect_success "GET for /ipfs/ unixfs dir listing succeeds" ' - curl -svX GET "http://127.0.0.1:$GWAY_PORT/ipfs/$ROOT1_CID/root2/root3/" >/dev/null 2>curl_ipfs_dir_listing_output - ' - - test_expect_success "GET for /ipns/ unixfs dir listing succeeds" ' - curl -svX GET "http://127.0.0.1:$GWAY_PORT/ipns/$TEST_IPNS_ID/root2/root3/" >/dev/null 2>curl_ipns_dir_listing_output - ' - - ## dir generated listing - test_expect_success "GET /ipfs/ dir response has special Etag for generated dir listing" ' - test_should_contain "< Etag: \"DirIndex" curl_ipfs_dir_listing_output && - grep -E "< Etag: \"DirIndex-.+_CID-${ROOT3_CID}\"" curl_ipfs_dir_listing_output - ' - test_expect_success "GET /ipns/ dir response has special Etag for generated dir listing" ' - test_should_contain "< Etag: \"DirIndex" curl_ipns_dir_listing_output && - grep -E "< Etag: \"DirIndex-.+_CID-${ROOT3_CID}\"" curl_ipns_dir_listing_output - ' - - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0119-prometheus.sh b/test/sharness/t0119-prometheus.sh deleted file mode 100755 index fef204e2312..00000000000 --- a/test/sharness/t0119-prometheus.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2020 Protocol Labs -# MIT/Apache-2.0 Licensed; see the LICENSE file in this repository. -# - -test_description="Test prometheus metrics are exposed correctly" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "enable ResourceMgr in the config" ' - ipfs config --json Swarm.ResourceMgr.Enabled false -' - -test_launch_ipfs_daemon - -test_expect_success "collect metrics" ' - curl "$API_ADDR/debug/metrics/prometheus" > raw_metrics -' - -test_kill_ipfs_daemon - -test_expect_success "filter metrics" ' - sed -ne "s/^\([a-z0-9_]\+\).*/\1/p" raw_metrics | LC_ALL=C sort | uniq > filtered_metrics -' - -test_expect_success "make sure metrics haven't changed" ' - diff -u ../t0119-prometheus-data/prometheus_metrics filtered_metrics -' - -# Check what was added by enabling ResourceMgr.Enabled -# -# NOTE: we won't see all the dynamic ones, but that is ok: the point of the -# test here is to detect regression when rcmgr metrics disappear due to -# refactor/human error. - -test_expect_success "enable ResourceMgr in the config" ' - ipfs config --json Swarm.ResourceMgr.Enabled true -' - -test_launch_ipfs_daemon - -test_expect_success "collect metrics" ' - curl "$API_ADDR/debug/metrics/prometheus" > raw_metrics -' - -test_kill_ipfs_daemon - -test_expect_success "filter metrics and find ones added by enabling ResourceMgr" ' - sed -ne "s/^\([a-z0-9_]\+\).*/\1/p" raw_metrics | LC_ALL=C sort > filtered_metrics && - grep -v -x -f ../t0119-prometheus-data/prometheus_metrics filtered_metrics | LC_ALL=C sort | uniq > rcmgr_metrics -' - -test_expect_success "make sure initial metrics added by setting ResourceMgr.Enabled haven't changed" ' - diff -u ../t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr rcmgr_metrics -' - -test_done diff --git a/test/sharness/t0120-bootstrap.sh b/test/sharness/t0120-bootstrap.sh deleted file mode 100755 index 00141da1f06..00000000000 --- a/test/sharness/t0120-bootstrap.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -# changing the bootstrap peers will require changing it in two places :) -BP1="/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" -BP2="/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa" -BP3="/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb" -BP4="/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" -BP5="/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8" -BP6="/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" -BP7="/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" - -test_description="Test ipfs repo operations" - -. lib/test-lib.sh - -test_init_ipfs - -# we use a function so that we can run it both offline + online -test_bootstrap_list_cmd() { - printf "" >list_expected - - for BP in "$@" - do - echo "$BP" >>list_expected - done - - test_expect_success "'ipfs bootstrap' succeeds" ' - ipfs bootstrap >list_actual - ' - - test_expect_success "'ipfs bootstrap' output looks good" ' - test_cmp list_expected list_actual - ' - - test_expect_success "'ipfs bootstrap list' succeeds" ' - ipfs bootstrap list >list2_actual - ' - - test_expect_success "'ipfs bootstrap list' output looks good" ' - test_cmp list_expected list2_actual - ' -} - -# we use a function so that we can run it both offline + online -test_bootstrap_cmd() { - - # remove all peers just in case. - # if this fails, the first listing may not be empty - ipfs bootstrap rm --all - - test_bootstrap_list_cmd - - test_expect_success "'ipfs bootstrap add' succeeds" ' - ipfs bootstrap add "$BP1" "$BP2" "$BP3" >add_actual - ' - - test_expect_success "'ipfs bootstrap add' output looks good" ' - echo "added $BP1" >add_expected && - echo "added $BP2" >>add_expected && - echo "added $BP3" >>add_expected && - test_cmp add_expected add_actual - ' - - test_bootstrap_list_cmd $BP1 $BP2 $BP3 - - test_expect_success "'ipfs bootstrap rm' succeeds" ' - ipfs bootstrap rm "$BP1" "$BP3" >rm_actual - ' - - test_expect_success "'ipfs bootstrap rm' output looks good" ' - echo "removed $BP1" >rm_expected && - echo "removed $BP3" >>rm_expected && - test_cmp rm_expected rm_actual - ' - - test_expect_success "'ipfs bootstrap rm' fails on bad peers" ' - test_expect_code 1 ipfs bootstrap rm "foo/bar" - ' - - test_bootstrap_list_cmd $BP2 - - test_expect_success "'ipfs bootstrap add --default' succeeds" ' - ipfs bootstrap add --default >add2_actual - ' - - test_expect_success "'ipfs bootstrap add --default' output has default BP" ' - echo "added $BP1" >add2_expected && - echo "added $BP2" >>add2_expected && - echo "added $BP3" >>add2_expected && - echo "added $BP4" >>add2_expected && - echo "added $BP5" >>add2_expected && - echo "added $BP6" >>add2_expected && - echo "added $BP7" >>add2_expected && - test_cmp add2_expected add2_actual - ' - - test_bootstrap_list_cmd $BP1 $BP2 $BP3 $BP4 $BP5 $BP6 $BP7 - - test_expect_success "'ipfs bootstrap rm --all' succeeds" ' - ipfs bootstrap rm --all >rm2_actual - ' - - test_expect_success "'ipfs bootstrap rm' output looks good" ' - echo "removed $BP1" >rm2_expected && - echo "removed $BP2" >>rm2_expected && - echo "removed $BP3" >>rm2_expected && - echo "removed $BP4" >>rm2_expected && - echo "removed $BP5" >>rm2_expected && - echo "removed $BP6" >>rm2_expected && - echo "removed $BP7" >>rm2_expected && - test_cmp rm2_expected rm2_actual - ' - - test_bootstrap_list_cmd - - test_expect_success "'ipfs bootstrap add' accepts args from stdin" ' - echo $BP1 > bpeers && - echo $BP2 >> bpeers && - echo $BP3 >> bpeers && - echo $BP4 >> bpeers && - cat bpeers | ipfs bootstrap add > add_stdin_actual - ' - - test_expect_success "output looks good" ' - echo "added $BP1" > bpeers_add_exp && - echo "added $BP2" >> bpeers_add_exp && - echo "added $BP3" >> bpeers_add_exp && - echo "added $BP4" >> bpeers_add_exp && - test_cmp add_stdin_actual bpeers_add_exp - ' - - test_bootstrap_list_cmd $BP1 $BP2 $BP3 $BP4 - - test_expect_success "'ipfs bootstrap rm' accepts args from stdin" ' - cat bpeers | ipfs bootstrap rm > rm_stdin_actual - ' - - test_expect_success "output looks good" ' - echo "removed $BP1" > bpeers_rm_exp && - echo "removed $BP2" >> bpeers_rm_exp && - echo "removed $BP3" >> bpeers_rm_exp && - echo "removed $BP4" >> bpeers_rm_exp && - test_cmp rm_stdin_actual bpeers_rm_exp - ' - - test_bootstrap_list_cmd -} - -# should work offline -test_bootstrap_cmd - -# should work online -test_launch_ipfs_daemon -test_bootstrap_cmd -test_kill_ipfs_daemon - - -test_done diff --git a/test/sharness/t0121-bootstrap-iptb.sh b/test/sharness/t0121-bootstrap-iptb.sh deleted file mode 100755 index 16dcbdb2f09..00000000000 --- a/test/sharness/t0121-bootstrap-iptb.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -# changing the bootstrap peers will require changing it in two places :) -test_description="test node bootstrapping" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "disable mdns" ' - ipfs config Discovery.MDNS.Enabled false --json -' - -test_launch_ipfs_daemon - -test_expect_success "setup iptb nodes" ' - iptb testbed create -type localipfs -count 5 -force -init -' - -test_expect_success "start up iptb nodes" ' - iptb start -wait -' - -test_expect_success "check peers works" ' - ipfs swarm peers >peers_out -' - -test_expect_success "correct number of peers" ' - test -z "`cat peers_out`" -' - -betterwait() { - while kill -0 $1; do true; done -} - -test_expect_success "bring down iptb nodes" ' - PID0=$(cat "$IPTB_ROOT/testbeds/default/0/daemon.pid") && - PID1=$(cat "$IPTB_ROOT/testbeds/default/1/daemon.pid") && - PID2=$(cat "$IPTB_ROOT/testbeds/default/2/daemon.pid") && - PID3=$(cat "$IPTB_ROOT/testbeds/default/3/daemon.pid") && - PID4=$(cat "$IPTB_ROOT/testbeds/default/4/daemon.pid") && - iptb stop && # TODO: add --wait flag to iptb stop - betterwait $PID0 - betterwait $PID1 - betterwait $PID2 - betterwait $PID3 - betterwait $PID4 -' - -test_expect_success "reset iptb nodes" ' - # the api doesnt seem to get cleaned up in sharness tests for some reason - iptb testbed create -type localipfs -count 5 -force -init -' - -test_expect_success "set bootstrap addrs" ' - bsn_peer_id=$(ipfs id -f "") && - BADDR="/ip4/127.0.0.1/tcp/$SWARM_PORT/p2p/$bsn_peer_id" && - ipfsi 0 bootstrap add $BADDR && - ipfsi 1 bootstrap add $BADDR && - ipfsi 2 bootstrap add $BADDR && - ipfsi 3 bootstrap add $BADDR && - ipfsi 4 bootstrap add $BADDR -' - -test_expect_success "start up iptb nodes" ' - iptb start -wait -' - -test_expect_success "check peers works" ' - ipfs swarm peers > peers_out -' - -test_expect_success "correct number of peers" ' - test `cat peers_out | wc -l` = 5 -' - -test_kill_ipfs_daemon - -test_expect_success "bring down iptb nodes" ' - iptb stop -' - -test_done diff --git a/test/sharness/t0131-multinode-client-routing.sh b/test/sharness/t0131-multinode-client-routing.sh deleted file mode 100755 index 8949a1bdfd8..00000000000 --- a/test/sharness/t0131-multinode-client-routing.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test client mode dht" - -. lib/test-lib.sh - -check_file_fetch() { - node=$1 - fhash=$2 - fname=$3 - - test_expect_success "can fetch file" ' - ipfsi $node cat $fhash > fetch_out - ' - - test_expect_success "file looks good" ' - test_cmp $fname fetch_out - ' -} - -run_single_file_test() { - test_expect_success "add a file on node1" ' - random 1000000 > filea && - FILEA_HASH=$(ipfsi 1 add -q filea) - ' - - check_file_fetch 9 $FILEA_HASH filea - check_file_fetch 8 $FILEA_HASH filea - check_file_fetch 7 $FILEA_HASH filea - check_file_fetch 6 $FILEA_HASH filea - check_file_fetch 5 $FILEA_HASH filea - check_file_fetch 4 $FILEA_HASH filea - check_file_fetch 3 $FILEA_HASH filea - check_file_fetch 2 $FILEA_HASH filea - check_file_fetch 1 $FILEA_HASH filea - check_file_fetch 0 $FILEA_HASH filea -} - -NNODES=10 - -test_expect_success "set up testbed" ' - iptb testbed create -type localipfs -count $NNODES -force -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true -' - -test_expect_success "start up nodes" ' - iptb start -wait [0-7] && - iptb start -wait [8-9] -- --routing=dhtclient -' - -test_expect_success "connect up nodes" ' - iptb connect [1-9] 0 -' - -test_expect_success "add a file on a node in client mode" ' - random 1000000 > filea && - FILE_HASH=$(ipfsi 8 add -q filea) -' - -test_expect_success "retrieve that file on a node in client mode" ' - check_file_fetch 9 $FILE_HASH filea -' - -run_single_file_test - -test_expect_success "shut down nodes" ' - iptb stop -' - -test_done diff --git a/test/sharness/t0140-swarm.sh b/test/sharness/t0140-swarm.sh deleted file mode 100755 index d65831d3e22..00000000000 --- a/test/sharness/t0140-swarm.sh +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs swarm command" - -. lib/test-lib.sh - -test_init_ipfs - -test_launch_ipfs_daemon - -test_expect_success 'disconnected: peers is empty' ' - ipfs swarm peers >actual && - test_must_be_empty actual -' - -test_expect_success 'disconnected: addrs local has localhost' ' - ipfs swarm addrs local >actual && - grep "/ip4/127.0.0.1" actual -' - -test_expect_success 'disconnected: addrs local matches ipfs id' ' - ipfs id -f="\\n" | sort >expected && - ipfs swarm addrs local --id | sort >actual && - test_cmp expected actual -' - -test_expect_success "ipfs id self works" ' - myid=$(ipfs id -f="") && - ipfs id --timeout=1s $myid > output -' - -test_expect_success "output looks good" ' - grep $myid output && - grep PublicKey output -' - -addr="/ip4/127.0.0.1/tcp/9898/p2p/QmUWKoHbjsqsSMesRC2Zoscs8edyFz6F77auBB1YBBhgpX" - -test_expect_success "can't trigger a dial backoff with swarm connect" ' - test_expect_code 1 ipfs swarm connect $addr 2> connect_out - test_expect_code 1 ipfs swarm connect $addr 2>> connect_out - test_expect_code 1 ipfs swarm connect $addr 2>> connect_out - test_expect_code 1 grep "backoff" connect_out -' - -test_kill_ipfs_daemon - -announceCfg='["/ip4/127.0.0.1/tcp/4001", "/ip4/1.2.3.4/tcp/1234"]' -test_expect_success "test_config_set succeeds" " - ipfs config --json Addresses.Announce '$announceCfg' -" - -test_launch_ipfs_daemon - -test_expect_success 'Addresses.Announce affects addresses' ' - ipfs swarm addrs local >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual && - ipfs id -f"" | xargs -n1 echo >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual -' - -test_kill_ipfs_daemon - - -announceCfg='["/ip4/127.0.0.1/tcp/4001", "/ip4/1.2.3.4/tcp/1234"]' -test_expect_success "test_config_set succeeds" " - ipfs config --json Addresses.Announce '$announceCfg' -" -# Include "/ip4/1.2.3.4/tcp/1234" to ensure we deduplicate addrs already present in Swarm.Announce -appendAnnounceCfg='["/dnsaddr/dynamic.example.com", "/ip4/10.20.30.40/tcp/4321", "/ip4/1.2.3.4/tcp/1234"]' -test_expect_success "test_config_set Announce and AppendAnnounce succeeds" " - ipfs config --json Addresses.Announce '$announceCfg' && - ipfs config --json Addresses.AppendAnnounce '$appendAnnounceCfg' -" - -test_launch_ipfs_daemon - -test_expect_success 'Addresses.AppendAnnounce is applied on top of Announce' ' - ipfs swarm addrs local >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual && - grep "/dnsaddr/dynamic.example.com" actual && - grep "/ip4/10.20.30.40/tcp/4321" actual && - ipfs id -f"" | xargs -n1 echo | tee actual && - grep "/ip4/1.2.3.4/tcp/1234/p2p" actual && - grep "/dnsaddr/dynamic.example.com/p2p/" actual && - grep "/ip4/10.20.30.40/tcp/4321/p2p/" actual -' - -test_kill_ipfs_daemon - -noAnnounceCfg='["/ip4/1.2.3.4/tcp/1234"]' -test_expect_success "test_config_set succeeds" " - ipfs config --json Addresses.NoAnnounce '$noAnnounceCfg' -" - -test_launch_ipfs_daemon - -test_expect_success "Addresses.NoAnnounce affects addresses from Announce and AppendAnnounce" ' - ipfs swarm addrs local >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && - grep -v "/ip4/10.20.30.40/tcp/4321" actual && - ipfs id -f"" | xargs -n1 echo >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && - grep -v "//ip4/10.20.30.40/tcp/4321" actual -' - -test_kill_ipfs_daemon - -noAnnounceCfg='["/ip4/1.2.3.4/ipcidr/16"]' -test_expect_success "test_config_set succeeds" " - ipfs config --json Addresses.NoAnnounce '$noAnnounceCfg' -" - -test_launch_ipfs_daemon - -test_expect_success "Addresses.NoAnnounce with /ipcidr affects addresses" ' - ipfs swarm addrs local >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && - ipfs id -f"" | xargs -n1 echo >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual -' - -test_kill_ipfs_daemon - -test_launch_ipfs_daemon - -test_expect_success "'ipfs swarm peering ls' lists peerings" ' - ipfs swarm peering ls -' - -peeringID='QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N' -peeringID2='QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5K' -peeringAddr='/ip4/1.2.3.4/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N' -peeringAddr2='/ip4/1.2.3.4/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5K' -test_expect_success "'ipfs swarm peering add' adds a peering" ' - ipfs swarm peering ls > peeringls && - ! test_should_contain ${peeringID} peeringls && - ! test_should_contain ${peeringID2} peeringls && - ipfs swarm peering add ${peeringAddr} ${peeringAddr2} -' - -test_expect_success 'a peering is added' ' - ipfs swarm peering ls > peeringadd && - test_should_contain ${peeringID} peeringadd && - test_should_contain ${peeringID2} peeringadd -' - -test_expect_success "'swarm peering rm' removes a peering" ' - ipfs swarm peering rm ${peeringID} -' - -test_expect_success 'peering is removed' ' - ipfs swarm peering ls > peeringrm && - ! test_should_contain ${peeringID} peeringrm -' - -test_kill_ipfs_daemon - -test_expect_success "set up tcp testbed" ' - iptb testbed create -type localipfs -count 2 -force -init -' - -startup_cluster 2 - -test_expect_success "disconnect work without specifying a transport address" ' - [ $(ipfsi 0 swarm peers | wc -l) -eq 1 ] && - ipfsi 0 swarm disconnect "/p2p/$(iptb attr get 1 id)" && - [ $(ipfsi 0 swarm peers | wc -l) -eq 0 ] -' - -test_expect_success "connect work without specifying a transport address" ' - [ $(ipfsi 0 swarm peers | wc -l) -eq 0 ] && - ipfsi 0 swarm connect "/p2p/$(iptb attr get 1 id)" && - [ $(ipfsi 0 swarm peers | wc -l) -eq 1 ] -' - -test_expect_success "/p2p addresses work" ' - [ $(ipfsi 0 swarm peers | wc -l) -eq 1 ] && - ipfsi 0 swarm disconnect "/p2p/$(iptb attr get 1 id)" && - [ $(ipfsi 0 swarm peers | wc -l) -eq 0 ] && - ipfsi 0 swarm connect "/p2p/$(iptb attr get 1 id)" && - [ $(ipfsi 0 swarm peers | wc -l) -eq 1 ] -' - -test_expect_success "ipfs id is consistent for node 0" ' - ipfsi 1 id "$(iptb attr get 0 id)" > 1see0 && - ipfsi 0 id > 0see0 && - test_cmp 1see0 0see0 -' - -test_expect_success "ipfs id is consistent for node 1" ' - ipfsi 0 id "$(iptb attr get 1 id)" > 0see1 && - ipfsi 1 id > 1see1 && - test_cmp 0see1 1see1 -' - -test_expect_success "addresses contain /p2p/..." ' - test_should_contain "/p2p/$(iptb attr get 1 id)\"" 0see1 && - test_should_contain "/p2p/$(iptb attr get 1 id)\"" 1see1 && - test_should_contain "/p2p/$(iptb attr get 0 id)\"" 1see0 && - test_should_contain "/p2p/$(iptb attr get 0 id)\"" 0see0 -' - -test_expect_success "stopping cluster" ' - iptb stop -' - -test_done diff --git a/test/sharness/t0141-addfilter.sh b/test/sharness/t0141-addfilter.sh deleted file mode 100755 index 5874f31122b..00000000000 --- a/test/sharness/t0141-addfilter.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs swarm command" - -AF1="/ip4/192.168.0.0/ipcidr/16" -AF2="/ip4/127.0.0.0/ipcidr/8" -AF3="/ip6/2008:bcd::/ipcidr/32" -AF4="/ip4/172.16.0.0/ipcidr/12" - -. lib/test-lib.sh - -test_init_ipfs - -test_swarm_filter_cmd() { - printf "" > list_expected - for AF in "$@" - do - echo "$AF" >>list_expected - done - - test_expect_success "'ipfs swarm filters' succeeds" ' - ipfs swarm filters > list_actual - ' - - test_expect_success "'ipfs swarm filters' output looks good" ' - test_sort_cmp list_expected list_actual - ' -} - -test_config_swarm_addrfilters_cmd() { - printf "" > list_expected - for AF in "$@" - do - echo "$AF" >>list_expected - done - - test_expect_success "'ipfs config Swarm.AddrFilters' succeeds" ' - ipfs config Swarm.AddrFilters > list_actual - ' - - printf "" > list_actual_cleaned - if [ "$( cat list_actual )" != "[]" -a "$( cat list_actual )" != "null" ]; - then - grep -v "^\]" list_actual | - grep -v "^\[" | - tr -d '" ,' > list_actual_cleaned - fi - - test_expect_success "'ipfs config Swarm.AddrFilters' output looks good" ' - test_sort_cmp list_expected list_actual_cleaned - ' -} - -test_swarm_filters() { - - # expect first address from config - test_swarm_filter_cmd $AF1 $AF4 - - test_config_swarm_addrfilters_cmd $AF1 $AF4 - - ipfs swarm filters rm all - - test_swarm_filter_cmd - - test_config_swarm_addrfilters_cmd - - test_expect_success "'ipfs swarm filter add' succeeds" ' - ipfs swarm filters add $AF1 $AF2 $AF3 - ' - - test_swarm_filter_cmd $AF1 $AF2 $AF3 - - test_config_swarm_addrfilters_cmd $AF1 $AF2 $AF3 - - test_expect_success "'ipfs swarm filter rm' succeeds" ' - ipfs swarm filters rm $AF2 $AF3 - ' - - test_swarm_filter_cmd $AF1 - - test_config_swarm_addrfilters_cmd $AF1 - - test_expect_success "'ipfs swarm filter add' succeeds" ' - ipfs swarm filters add $AF4 $AF2 - ' - - test_swarm_filter_cmd $AF1 $AF2 $AF4 - - test_config_swarm_addrfilters_cmd $AF1 $AF2 $AF4 - - test_expect_success "'ipfs swarm filter rm' succeeds" ' - ipfs swarm filters rm $AF1 $AF2 $AF4 - ' - - test_swarm_filter_cmd - - test_config_swarm_addrfilters_cmd -} - -test_expect_success "init without any filters" ' - echo "null" >expected && - ipfs config Swarm.AddrFilters >actual && - test_cmp expected actual -' - -test_expect_success "adding addresses to the config to filter succeeds" ' - ipfs config --json Swarm.AddrFilters "[\"$AF1\", \"$AF4\"]" -' - -test_launch_ipfs_daemon - -test_swarm_filters - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0142-testfilter.sh b/test/sharness/t0142-testfilter.sh deleted file mode 100755 index bdd7e4f76b1..00000000000 --- a/test/sharness/t0142-testfilter.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2020 Protocol Labs -# MIT/Apache-2.0 Licensed; see the LICENSE file in this repository. -# - -test_description="Test swarm filters are effective" - -AF="/ip4/127.0.0.0/ipcidr/24" - -. lib/test-lib.sh - -NUM_NODES=3 - -test_expect_success "set up testbed" ' - iptb testbed create -type localipfs -count $NUM_NODES -force -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true -' - -test_expect_success 'filter 127.0.0.0/24 on node 1' ' - ipfsi 1 config --json Swarm.AddrFilters "[\"$AF\"]" -' - -for i in $(seq 0 $(( NUM_NODES - 1 ))); do - test_expect_success "change IP for node $i" ' - ipfsi $i config --json "Addresses.Swarm" \ - "[\"/ip4/127.0.$i.1/tcp/0\",\"/ip4/127.0.$i.1/udp/0/quic\",\"/ip4/127.0.$i.1/tcp/0/ws\"]" - ' -done - -test_expect_success 'start cluster' ' - iptb start --wait -' - -test_expect_success 'connecting 1 to 0 fails' ' - test_must_fail iptb connect 1 0 -' - -test_expect_success 'connecting 0 to 1 fails' ' - test_must_fail iptb connect 1 0 -' - -test_expect_success 'connecting 2 to 0 succeeds' ' - iptb connect 2 0 -' - -test_expect_success 'connecting 1 to 0 with dns addrs fails' ' - ipfsi 0 id -f "" | sed "s|^/ip4/127.0.0.1/|/dns4/localhost/|" > addrs && - test_must_fail ipfsi 1 swarm connect $(cat addrs) -' - - -test_expect_success 'stopping cluster' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0150-clisuggest.sh b/test/sharness/t0150-clisuggest.sh deleted file mode 100755 index 30ae6acd2ea..00000000000 --- a/test/sharness/t0150-clisuggest.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test ipfs cli cmd suggest" - -. lib/test-lib.sh - -test_suggest() { - - - test_expect_success "test command fails" ' - test_must_fail ipfs kog 2>actual - ' - - test_expect_success "test one command is suggested" ' - grep "Did you mean this?" actual && - grep "log" actual || - test_fsh cat actual - ' - - test_expect_success "test command fails" ' - test_must_fail ipfs li 2>actual - ' - - test_expect_success "test multiple commands are suggested" ' - grep "Did you mean any of these?" actual && - grep "ls" actual && - grep "log" actual || - test_fsh cat actual - ' - -} - -test_init_ipfs - -test_suggest - -test_launch_ipfs_daemon - -test_suggest - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0151-sysdiag.sh b/test/sharness/t0151-sysdiag.sh deleted file mode 100755 index 5c95dda2620..00000000000 --- a/test/sharness/t0151-sysdiag.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test output of sysdiag command" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "ipfs diag sys succeeds" ' - ipfs diag sys > output -' - -test_expect_success "output contains some expected keys" ' - grep "virt" output && - grep "interface_addresses" output && - grep "arch" output && - grep "online" output -' - -test_expect_success "uname succeeds" ' - UOUT=$(uname) -' - -test_expect_success "output is similar to uname" ' - case $UOUT in - Linux) - grep linux output > /dev/null - ;; - Darwin) - grep darwin output > /dev/null - ;; - FreeBSD) - grep freebsd output > /dev/null - ;; - CYGWIN*) - grep windows output > /dev/null - ;; - *) - test_fsh echo system check for $UOUT failed, unsupported system? - ;; - esac -' - -test_done diff --git a/test/sharness/t0152-profile.sh b/test/sharness/t0152-profile.sh deleted file mode 100755 index 0bc328d2ed2..00000000000 --- a/test/sharness/t0152-profile.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test profile collection" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "profiling requires a running daemon" ' - test_must_fail ipfs diag profile -' - -test_launch_ipfs_daemon - -test_expect_success "test profiling (without sampling)" ' - ipfs diag profile --profile-time=0 > cmd_out -' - -test_expect_success "filename shows up in output" ' - grep -q "ipfs-profile" cmd_out > /dev/null -' - -test_expect_success "profile file created" ' - test -e "$(sed -n -e "s/.*\(ipfs-profile.*\.zip\)/\1/p" cmd_out)" -' - -test_expect_success "test profiling with -o" ' - ipfs diag profile --profile-time=1s -o test-profile.zip -' - -test_expect_success "test that test-profile.zip exists" ' - test -e test-profile.zip -' - -test_expect_success "test profiling with specific collectors" ' - ipfs diag profile --collectors version,goroutines-stack -o test-profile-small.zip -' - -test_kill_ipfs_daemon - -if ! test_have_prereq UNZIP; then - test_done -fi - -test_expect_success "unpack profiles" ' - unzip -d profiles test-profile.zip && - unzip -d profiles-small test-profile-small.zip -' - -test_expect_success "cpu profile is valid" ' - go tool pprof -top profiles/ipfs "profiles/cpu.pprof" | grep -q "Type: cpu" -' - -test_expect_success "heap profile is valid" ' - go tool pprof -top profiles/ipfs "profiles/heap.pprof" | grep -q "Type: inuse_space" -' - -test_expect_success "goroutines profile is valid" ' - go tool pprof -top profiles/ipfs "profiles/goroutines.pprof" | grep -q "Type: goroutine" -' - -test_expect_success "mutex profile is valid" ' - go tool pprof -top profiles/ipfs "profiles/mutex.pprof" | grep -q "Type: delay" -' - -test_expect_success "block profile is valid" ' - go tool pprof -top profiles/ipfs "profiles/block.pprof" | grep -q "Type: delay" -' - -test_expect_success "goroutines stacktrace is valid" ' - grep -q "goroutine" "profiles/goroutines.stacks" -' - -test_expect_success "the small profile only contains the requested data" ' - find profiles-small -type f | sort > actual && - echo -e "profiles-small/goroutines.stacks\nprofiles-small/version.json" > expected && - test_cmp expected actual -' - -test_done diff --git a/test/sharness/t0160-resolve.sh b/test/sharness/t0160-resolve.sh deleted file mode 100755 index 5ec3f99be0d..00000000000 --- a/test/sharness/t0160-resolve.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test resolve command" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "resolve: prepare files" ' - mkdir -p a/b && - echo "a/b/c" >a/b/c && - a_hash=$(ipfs add -Q -r a) && - b_hash=$(ipfs add -Q -r a/b) && - c_hash=$(ipfs add -Q -r a/b/c) && - a_hash_b32=$(cid-fmt -v 1 -b b %s $a_hash) && - b_hash_b32=$(cid-fmt -v 1 -b b %s $b_hash) && - c_hash_b32=$(cid-fmt -v 1 -b b %s $c_hash) -' - -test_expect_success "resolve: prepare dag" ' - dag_hash=$(ipfs dag put <<<"{\"i\": {\"j\": {\"k\": \"asdfasdfasdf\"}}}") -' - -test_expect_success "resolve: prepare keys" ' - self_hash=$(ipfs key list --ipns-base=base36 -l | grep self | cut -d " " -f1) && - alt_hash=$(ipfs key gen --ipns-base=base36 -t rsa alt) -' - -test_resolve_setup_name() { - local key="$1" - local ref="$2" - - # we pass here --ttl=0s to ensure that it does not get cached by namesys. - # the alternative would be to wait between tests to ensure that the namesys - # cache gets purged in time, but that adds runtime time for the tests. - test_expect_success "resolve: prepare $key" ' - ipfs name publish --key="$key" --ttl=0s --allow-offline "$ref" - ' -} - -test_resolve() { - src=$1 - dst=$2 - extra=$3 - - test_expect_success "resolve succeeds: $src" ' - ipfs resolve $extra "$src" >actual - ' - - test_expect_success "resolved correctly: $src -> $dst" ' - printf "$dst\n" >expected && - test_cmp expected actual - ' -} - -test_resolve_cmd() { - echo '-- starting test_resolve_cmd' - test_resolve "/ipfs/$a_hash" "/ipfs/$a_hash" - test_resolve "/ipfs/$a_hash/b" "/ipfs/$b_hash" - test_resolve "/ipfs/$a_hash/b/c" "/ipfs/$c_hash" - test_resolve "/ipfs/$b_hash/c" "/ipfs/$c_hash" - test_resolve "/ipld/$dag_hash/i/j/k" "/ipld/$dag_hash/i/j/k" - test_resolve "/ipld/$dag_hash/i/j" "/ipld/$dag_hash/i/j" - test_resolve "/ipld/$dag_hash/i" "/ipld/$dag_hash/i" - - test_resolve_setup_name "self" "/ipfs/$a_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$a_hash" - test_resolve "/ipns/$self_hash/b" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash/b/c" "/ipfs/$c_hash" - - test_resolve_setup_name "self" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash/c" "/ipfs/$c_hash" - - test_resolve_setup_name "self" "/ipfs/$c_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$c_hash" - - # simple recursion succeeds - test_resolve_setup_name "alt" "/ipns/$self_hash" - test_resolve "/ipns/$alt_hash" "/ipfs/$c_hash" - - # partial resolve succeeds - test_resolve "/ipns/$alt_hash" "/ipns/$self_hash" -r=false - - # infinite recursion fails - test_resolve_setup_name "self" "/ipns/$self_hash" - test_expect_success "recursive resolve terminates" ' - test_expect_code 1 ipfs resolve /ipns/$self_hash 2>recursion_error && - grep "recursion limit exceeded" recursion_error - ' -} - -test_resolve_cmd_b32() { - echo '-- starting test_resolve_cmd_b32' - # no flags needed, base should be preserved - - test_resolve "/ipfs/$a_hash_b32" "/ipfs/$a_hash_b32" - test_resolve "/ipfs/$a_hash_b32/b" "/ipfs/$b_hash_b32" - test_resolve "/ipfs/$a_hash_b32/b/c" "/ipfs/$c_hash_b32" - test_resolve "/ipfs/$b_hash_b32/c" "/ipfs/$c_hash_b32" - - # flags needed passed in path does not contain cid to derive base - - test_resolve_setup_name "self" "/ipfs/$a_hash_b32" - test_resolve "/ipns/$self_hash" "/ipfs/$a_hash_b32" --cid-base=base32 - test_resolve "/ipns/$self_hash/b" "/ipfs/$b_hash_b32" --cid-base=base32 - test_resolve "/ipns/$self_hash/b/c" "/ipfs/$c_hash_b32" --cid-base=base32 - - test_resolve_setup_name "self" "/ipfs/$b_hash_b32" --cid-base=base32 - test_resolve "/ipns/$self_hash" "/ipfs/$b_hash_b32" --cid-base=base32 - test_resolve "/ipns/$self_hash/c" "/ipfs/$c_hash_b32" --cid-base=base32 - - test_resolve_setup_name "self" "/ipfs/$c_hash_b32" - test_resolve "/ipns/$self_hash" "/ipfs/$c_hash_b32" --cid-base=base32 - - # peer ID represented as CIDv1 require libp2p-key multicodec - # https://github.com/libp2p/specs/blob/master/RFC/0001-text-peerid-cid.md - local self_hash_b32protobuf=$(echo $self_hash | ipfs cid format -v 1 -b b --mc dag-pb) - local self_hash_b32libp2pkey=$(echo $self_hash | ipfs cid format -v 1 -b b --mc libp2p-key) - test_expect_success "resolve of /ipns/{cidv1} with multicodec other than libp2p-key returns a meaningful error" ' - test_expect_code 1 ipfs resolve /ipns/$self_hash_b32protobuf 2>cidcodec_error && - test_should_contain "Error: peer ID represented as CIDv1 require libp2p-key multicodec: retry with /ipns/$self_hash_b32libp2pkey" cidcodec_error - ' -} - -test_resolve_cmd_success() { - test_resolve "/ipfs/$a_hash" "/ipfs/$a_hash" - test_resolve "/ipfs/$a_hash/b" "/ipfs/$b_hash" - test_resolve "/ipfs/$a_hash/b/c" "/ipfs/$c_hash" - test_resolve "/ipfs/$b_hash/c" "/ipfs/$c_hash" - test_resolve "/ipld/$dag_hash" "/ipld/$dag_hash" - test_resolve "/ipld/$dag_hash/i/j/k" "/ipld/$dag_hash/i/j/k" - test_resolve "/ipld/$dag_hash/i/j" "/ipld/$dag_hash/i/j" - test_resolve "/ipld/$dag_hash/i" "/ipld/$dag_hash/i" - - test_resolve_setup_name "self" "/ipfs/$a_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$a_hash" - test_resolve "/ipns/$self_hash/b" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash/b/c" "/ipfs/$c_hash" - - test_resolve_setup_name "self" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$b_hash" - test_resolve "/ipns/$self_hash/c" "/ipfs/$c_hash" - - test_resolve_setup_name "self" "/ipfs/$c_hash" - test_resolve "/ipns/$self_hash" "/ipfs/$c_hash" -} - -# should work offline -test_resolve_cmd -test_resolve_cmd_b32 - -# should work online -test_launch_ipfs_daemon -test_resolve_cmd_success -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0165-keystore.sh b/test/sharness/t0165-keystore.sh deleted file mode 100755 index 60089ecd7f2..00000000000 --- a/test/sharness/t0165-keystore.sh +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test keystore commands" - -. lib/test-lib.sh - -test_init_ipfs - -test_key_cmd() { -# test key output format -test_expect_success "create an RSA key and test B58MH/B36CID output formats" ' -PEERID=$(ipfs key gen --ipns-base=b58mh --type=rsa --size=2048 key_rsa) && -test_check_rsa2048_b58mh_peerid $PEERID && -ipfs key rm key_rsa && -PEERID=$(ipfs key gen --ipns-base=base36 --type=rsa --size=2048 key_rsa) && -test_check_rsa2048_base36_peerid $PEERID -' - -test_expect_success "test RSA key sk export format" ' -ipfs key export key_rsa && -test_check_rsa2048_sk key_rsa.key && -rm key_rsa.key -' - -test_expect_success "test RSA key B58MH/B36CID multihash format" ' -PEERID=$(ipfs key list --ipns-base=b58mh -l | grep key_rsa | head -n 1 | cut -d " " -f1) && -test_check_rsa2048_b58mh_peerid $PEERID && -PEERID=$(ipfs key list --ipns-base=base36 -l | grep key_rsa | head -n 1 | cut -d " " -f1) && -test_check_rsa2048_base36_peerid $PEERID && -ipfs key rm key_rsa -' - -test_expect_success "create an ED25519 key and test B58MH/B36CID output formats" ' -PEERID=$(ipfs key gen --ipns-base=b58mh --type=ed25519 key_ed25519) && -test_check_ed25519_b58mh_peerid $PEERID && -ipfs key rm key_ed25519 && -PEERID=$(ipfs key gen --ipns-base=base36 --type=ed25519 key_ed25519) && -test_check_ed25519_base36_peerid $PEERID -' - -test_expect_success "test ED25519 key sk export format" ' -ipfs key export key_ed25519 && -test_check_ed25519_sk key_ed25519.key && -rm key_ed25519.key -' - -test_expect_success "test ED25519 key B58MH/B36CID multihash format" ' -PEERID=$(ipfs key list --ipns-base=b58mh -l | grep key_ed25519 | head -n 1 | cut -d " " -f1) && -test_check_ed25519_b58mh_peerid $PEERID && -PEERID=$(ipfs key list --ipns-base=base36 -l | grep key_ed25519 | head -n 1 | cut -d " " -f1) && -test_check_ed25519_base36_peerid $PEERID && -ipfs key rm key_ed25519 -' -# end of format test - - - test_expect_success "create a new rsa key" ' - rsahash=$(ipfs key gen generated_rsa_key --type=rsa --size=2048) - echo $rsahash > rsa_key_id - ' - - test_key_import_export_all_formats rsa_key - - test_expect_success "create a new ed25519 key" ' - edhash=$(ipfs key gen generated_ed25519_key --type=ed25519) - echo $edhash > ed25519_key_id - ' - - test_key_import_export_all_formats ed25519_key - - test_openssl_compatibility_all_types - - INVALID_KEY=../t0165-keystore-data/openssl_secp384r1.pem - test_expect_success "import key type we don't generate fails" ' - test_must_fail ipfs key import restricted-type -f pem-pkcs8-cleartext $INVALID_KEY 2>&1 | tee key_exp_out && - grep -q "Error: key type \*crypto.ECDSAPrivateKey is not allowed to be imported" key_exp_out && - rm key_exp_out - ' - - test_expect_success "import key type we don't generate succeeds with flag" ' - ipfs key import restricted-type --allow-any-key-type -f pem-pkcs8-cleartext $INVALID_KEY > /dev/null && - ipfs key rm restricted-type - ' - - test_expect_success "test export file option" ' - ipfs key export generated_rsa_key -o=named_rsa_export_file && - test_cmp generated_rsa_key.key named_rsa_export_file && - ipfs key export generated_ed25519_key -o=named_ed25519_export_file && - test_cmp generated_ed25519_key.key named_ed25519_export_file - ' - - test_expect_success "key export can't export self" ' - test_must_fail ipfs key export self 2>&1 | tee key_exp_out && - grep -q "Error: cannot export key with name" key_exp_out && - test_must_fail ipfs key export self -o=selfexport 2>&1 | tee key_exp_out && - grep -q "Error: cannot export key with name" key_exp_out - ' - - test_expect_success "key import can't import self" ' - ipfs key gen overwrite_self_import && - ipfs key export overwrite_self_import && - test_must_fail ipfs key import self overwrite_self_import.key 2>&1 | tee key_imp_out && - grep -q "Error: cannot import key with name" key_imp_out && - ipfs key rm overwrite_self_import && - rm overwrite_self_import.key - ' - - test_expect_success "add a default key" ' - ipfs key gen quxel - ' - - test_expect_success "all keys show up in list output" ' - echo generated_ed25519_key > list_exp && - echo generated_rsa_key >> list_exp && - echo quxel >> list_exp && - echo self >> list_exp - ipfs key list > list_out && - test_sort_cmp list_exp list_out - ' - - test_expect_success "key hashes show up in long list output" ' - ipfs key list -l | grep $edhash > /dev/null && - ipfs key list -l | grep $rsahash > /dev/null - ' - - test_expect_success "key list -l contains self key with peerID" ' - PeerID="$(ipfs config Identity.PeerID)" - ipfs key list -l --ipns-base=b58mh | grep "$PeerID\s\+self" - ' - - test_expect_success "key rm remove a key" ' - ipfs key rm generated_rsa_key - echo generated_ed25519_key > list_exp && - echo quxel >> list_exp && - echo self >> list_exp - ipfs key list > list_out && - test_sort_cmp list_exp list_out - ' - - test_expect_success "key rm can't remove self" ' - test_must_fail ipfs key rm self 2>&1 | tee key_rm_out && - grep -q "Error: cannot remove key with name" key_rm_out - ' - - test_expect_success "key rename rename a key" ' - ipfs key rename generated_ed25519_key fooed - echo fooed > list_exp && - echo quxel >> list_exp && - echo self >> list_exp - ipfs key list > list_out && - test_sort_cmp list_exp list_out - ' - - test_expect_success "key rename rename key output succeeds" ' - key_content=$(ipfs key gen key1 --type=rsa --size=2048) && - ipfs key rename key1 key2 >rs && - echo "Key $key_content renamed to key2" >expect && - test_cmp rs expect - ' - - test_expect_success "key rename can't rename self" ' - test_must_fail ipfs key rename self bar 2>&1 | tee key_rename_out && - grep -q "Error: cannot rename key with name" key_rename_out - ' - - test_expect_success "key rename can't overwrite self, even with force" ' - test_must_fail ipfs key rename -f fooed self 2>&1 | tee key_rename_out && - grep -q "Error: cannot overwrite key with name" key_rename_out - ' - - test_launch_ipfs_daemon - - test_expect_success "online import rsa key" ' - ipfs key import generated_rsa_key generated_rsa_key.key > roundtrip_rsa_key_id && - test_cmp rsa_key_id roundtrip_rsa_key_id - ' - - # export works directly on the keystore present in IPFS_PATH - test_expect_success "prepare ed25519 key while daemon is running" ' - edhash=$(ipfs key gen generated_ed25519_key --type=ed25519) - echo $edhash > ed25519_key_id - ' - - test_key_import_export_all_formats ed25519_key - - test_openssl_compatibility_all_types - - test_expect_success "key export over HTTP /api/v0/key/export is not possible" ' - ipfs key gen nohttpexporttest_key --type=ed25519 && - curl -X POST -sI "http://$API_ADDR/api/v0/key/export&arg=nohttpexporttest_key" | grep -q "^HTTP/1.1 404 Not Found" - ' - - test_expect_success "online rotate rsa key" ' - test_must_fail ipfs key rotate - ' - - test_kill_ipfs_daemon - -} - -test_check_rsa2048_sk() { - sklen=$(ls -l $1 | awk '{print $5}') && - test "$sklen" -lt "1600" && test "$sklen" -gt "1000" || { - echo "Bad RSA2048 sk '$1' with len '$sklen'" - return 1 - } -} - -test_check_ed25519_sk() { - sklen=$(ls -l $1 | awk '{print $5}') && - test "$sklen" -lt "100" && test "$sklen" -gt "30" || { - echo "Bad ED25519 sk '$1' with len '$sklen'" - return 1 - } -} - -test_key_import_export_all_formats() { - KEY_NAME=$1 - test_key_import_export $KEY_NAME pem-pkcs8-cleartext - test_key_import_export $KEY_NAME libp2p-protobuf-cleartext -} - -test_key_import_export() { - local KEY_NAME FORMAT - KEY_NAME=$1 - FORMAT=$2 - ORIG_KEY="generated_$KEY_NAME" - if [ $FORMAT == "pem-pkcs8-cleartext" ]; then - FILE_EXT="pem" - else - FILE_EXT="key" - fi - - test_expect_success "export and import $KEY_NAME with format $FORMAT" ' - ipfs key export $ORIG_KEY --format=$FORMAT && - ipfs key rm $ORIG_KEY && - ipfs key import $ORIG_KEY $ORIG_KEY.$FILE_EXT --format=$FORMAT > imported_key_id && - test_cmp ${KEY_NAME}_id imported_key_id - ' -} - -# Test the entire import/export cycle with a openssl-generated key. -# 1. Import openssl key with PEM format. -# 2. Export key with libp2p format. -# 3. Reimport key. -# 4. Now exported with PEM format. -# 5. Compare with original openssl key. -# 6. Clean up. -test_openssl_compatibility() { - local KEY_NAME FORMAT - KEY_NAME=$1 - - test_expect_success "import and export $KEY_NAME with all formats" ' - ipfs key import test-openssl -f pem-pkcs8-cleartext $KEY_NAME > /dev/null && - ipfs key export test-openssl -f libp2p-protobuf-cleartext -o $KEY_NAME.libp2p.key && - ipfs key rm test-openssl && - - ipfs key import test-openssl -f libp2p-protobuf-cleartext $KEY_NAME.libp2p.key > /dev/null && - ipfs key export test-openssl -f pem-pkcs8-cleartext -o $KEY_NAME.ipfs-exported.pem && - ipfs key rm test-openssl && - - test_cmp $KEY_NAME $KEY_NAME.ipfs-exported.pem && - - rm $KEY_NAME.libp2p.key && - rm $KEY_NAME.ipfs-exported.pem - ' -} - -test_openssl_compatibility_all_types() { - test_openssl_compatibility ../t0165-keystore-data/openssl_ed25519.pem - test_openssl_compatibility ../t0165-keystore-data/openssl_rsa.pem -} - - -test_key_cmd - -test_done diff --git a/test/sharness/t0180-p2p.sh b/test/sharness/t0180-p2p.sh deleted file mode 100755 index 4564fba90f9..00000000000 --- a/test/sharness/t0180-p2p.sh +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test experimental p2p commands" - -. lib/test-lib.sh - -# start iptb + wait for peering -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs --count 3 --init -' - -test_expect_success 'generate test data' ' - echo "ABCDEF" > test0.bin && - echo "012345" > test1.bin -' - -startup_cluster 3 - -test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_1=$(iptb attr get 1 id) -' -check_test_ports() { - test_expect_success "test ports are closed" ' - (! (netstat -aln | grep "LISTEN" | grep -E "[.:]10101 ")) && - (! (netstat -aln | grep "LISTEN" | grep -E "[.:]10102 ")) && - (! (netstat -aln | grep "LISTEN" | grep -E "[.:]10103 ")) && - (! (netstat -aln | grep "LISTEN" | grep -E "[.:]10104 ")) - ' -} -check_test_ports - -test_expect_success 'fail without config option being enabled' ' - test_must_fail ipfsi 0 p2p stream ls -' - -test_expect_success "enable filestore config setting" ' - ipfsi 0 config --json Experimental.Libp2pStreamMounting true - ipfsi 1 config --json Experimental.Libp2pStreamMounting true - ipfsi 2 config --json Experimental.Libp2pStreamMounting true -' - -test_expect_success 'start p2p listener' ' - ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10101 2>&1 > listener-stdouterr.log -' - -test_expect_success 'cannot re-register p2p listener' ' - test_must_fail ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10103 2>&1 > listener-stdouterr.log -' - -# Server to client communications - -spawn_sending_server() { - test_expect_success 'S->C Spawn sending server' ' - ma-pipe-unidir --listen --pidFile=listener.pid send /ip4/127.0.0.1/tcp/10101 < test0.bin & - - test_wait_for_file 30 100ms listener.pid && - kill -0 $(cat listener.pid) - ' -} - -test_server_to_client() { - test_expect_success 'S->C Connect and receive data' ' - ma-pipe-unidir recv /ip4/127.0.0.1/tcp/10102 > client.out - ' - - test_expect_success 'S->C Ensure server finished' ' - test ! -f listener.pid - ' - - test_expect_success 'S->C Output looks good' ' - test_cmp client.out test0.bin - ' -} - -spawn_sending_server - -test_expect_success 'S->C(/p2p/peerID) Setup client side' ' - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10102 /p2p/${PEERID_0} 2>&1 > dialer-stdouterr.log -' - -test_expect_success 'S->C Setup(dnsaddr/addr/p2p/peerID) client side' ' - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10103 /dnsaddr/bootstrap.libp2p.io/p2p/${PEERID_0} 2>&1 > dialer-stdouterr.log -' - -test_expect_success 'S->C Setup(dnsaddr/addr) client side' ' - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10104 /dnsaddr/example-dnsaddr.multiformats.io 2>&1 > dialer-stdouterr.log -' - - -test_expect_success 'S->C Output is empty' ' - test_must_be_empty dialer-stdouterr.log -' - -test_expect_success "'ipfs p2p ls | grep' succeeds" ' - ipfsi 1 p2p ls | grep "/x/p2p-test /ip4/127.0.0.1/tcp/10104" -' - -test_server_to_client - -test_expect_success 'S->C Connect with dead server' ' - ma-pipe-unidir recv /ip4/127.0.0.1/tcp/10102 > client.out -' - -test_expect_success 'S->C Output is empty' ' - test_must_be_empty client.out -' - -spawn_sending_server - -test_server_to_client - -test_expect_success 'S->C Close local listener' ' - ipfsi 1 p2p close -p /x/p2p-test -' - -check_test_ports - -# Client to server communications - -test_expect_success 'C->S Spawn receiving server' ' - ma-pipe-unidir --listen --pidFile=listener.pid recv /ip4/127.0.0.1/tcp/10101 > server.out & - - test_wait_for_file 30 100ms listener.pid && - kill -0 $(cat listener.pid) -' - -test_expect_success 'C->S Setup client side' ' - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10102 /p2p/${PEERID_0} 2>&1 > dialer-stdouterr.log -' - -test_expect_success 'C->S Connect and receive data' ' - ma-pipe-unidir send /ip4/127.0.0.1/tcp/10102 < test1.bin -' - -test_expect_success 'C->S Ensure server finished' ' - go-sleep 250ms && - test ! -f listener.pid -' - -test_expect_success 'C->S Output looks good' ' - test_cmp server.out test1.bin -' - -test_expect_success 'C->S Close local listener' ' - ipfsi 1 p2p close -p /x/p2p-test -' - -check_test_ports - -# Checking port - -test_expect_success "cannot accept 0 port in 'ipfs p2p listen'" ' - test_must_fail ipfsi 2 p2p listen /x/p2p-test/0 /ip4/127.0.0.1/tcp/0 -' - -test_expect_success "'ipfs p2p forward' accept 0 port" ' - ipfsi 2 p2p forward /x/p2p-test/0 /ip4/127.0.0.1/tcp/0 /p2p/$PEERID_0 -' - -test_expect_success "'ipfs p2p ls' output looks good" ' - echo "true" > forward_0_expected && - ipfsi 2 p2p ls | awk '\''{print $2}'\'' | sed "s/.*\///" | awk -F: '\''{if($1>0)print"true"}'\'' > forward_0_actual && - ipfsi 2 p2p close -p /x/p2p-test/0 && - test_cmp forward_0_expected forward_0_actual -' - -# Listing streams - -test_expect_success "'ipfs p2p ls' succeeds" ' - echo "/x/p2p-test /p2p/$PEERID_0 /ip4/127.0.0.1/tcp/10101" > expected && - ipfsi 0 p2p ls > actual -' - -test_expect_success "'ipfs p2p ls' output looks good" ' - test_cmp expected actual -' - -test_expect_success "Cannot re-register app handler" ' - test_must_fail ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10101 -' - -test_expect_success "'ipfs p2p stream ls' output is empty" ' - ipfsi 0 p2p stream ls > actual && - test_must_be_empty actual -' - -check_test_ports - -test_expect_success "Setup: Idle stream" ' - ma-pipe-unidir --listen --pidFile=listener.pid recv /ip4/127.0.0.1/tcp/10101 & - - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10102 /p2p/$PEERID_0 && - ma-pipe-unidir --pidFile=client.pid recv /ip4/127.0.0.1/tcp/10102 & - - test_wait_for_file 30 100ms listener.pid && - test_wait_for_file 30 100ms client.pid && - kill -0 $(cat listener.pid) && kill -0 $(cat client.pid) -' - -test_expect_success "'ipfs p2p stream ls' succeeds" ' - echo "3 /x/p2p-test /p2p/$PEERID_1 /ip4/127.0.0.1/tcp/10101" > expected - ipfsi 0 p2p stream ls > actual -' - -test_expect_success "'ipfs p2p stream ls' output looks good" ' - test_cmp expected actual -' - -test_expect_success "'ipfs p2p stream close' closes stream" ' - ipfsi 0 p2p stream close 3 && - ipfsi 0 p2p stream ls > actual && - [ ! -f listener.pid ] && [ ! -f client.pid ] && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p close' closes remote handler" ' - ipfsi 0 p2p close -p /x/p2p-test && - ipfsi 0 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p close' closes local handler" ' - ipfsi 1 p2p close -p /x/p2p-test && - ipfsi 1 p2p ls > actual && - test_must_be_empty actual -' - -check_test_ports - -test_expect_success "Setup: Idle stream(2)" ' - ma-pipe-unidir --listen --pidFile=listener.pid recv /ip4/127.0.0.1/tcp/10101 & - - ipfsi 0 p2p listen /x/p2p-test2 /ip4/127.0.0.1/tcp/10101 2>&1 > listener-stdouterr.log && - ipfsi 1 p2p forward /x/p2p-test2 /ip4/127.0.0.1/tcp/10102 /p2p/$PEERID_0 2>&1 > dialer-stdouterr.log && - ma-pipe-unidir --pidFile=client.pid recv /ip4/127.0.0.1/tcp/10102 & - - test_wait_for_file 30 100ms listener.pid && - test_wait_for_file 30 100ms client.pid && - kill -0 $(cat listener.pid) && kill -0 $(cat client.pid) -' - -test_expect_success "'ipfs p2p stream ls' succeeds(2)" ' - echo "4 /x/p2p-test2 /p2p/$PEERID_1 /ip4/127.0.0.1/tcp/10101" > expected - ipfsi 0 p2p stream ls > actual - test_cmp expected actual -' - -test_expect_success "'ipfs p2p close -a' closes remote app handlers" ' - ipfsi 0 p2p close -a && - ipfsi 0 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p close -a' closes local app handlers" ' - ipfsi 1 p2p close -a && - ipfsi 1 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p stream close -a' closes streams" ' - ipfsi 0 p2p stream close -a && - ipfsi 0 p2p stream ls > actual && - [ ! -f listener.pid ] && [ ! -f client.pid ] && - test_must_be_empty actual -' - -check_test_ports - -test_expect_success "'ipfs p2p close' closes app numeric handlers" ' - ipfsi 0 p2p listen /x/1234 /ip4/127.0.0.1/tcp/10101 && - ipfsi 0 p2p close -p /x/1234 && - ipfsi 0 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p close' closes by target addr" ' - ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10101 && - ipfsi 0 p2p close -t /ip4/127.0.0.1/tcp/10101 && - ipfsi 0 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "'ipfs p2p close' closes right listeners" ' - ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10101 && - ipfsi 0 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10101 /p2p/$PEERID_1 && - echo "/x/p2p-test /p2p/$PEERID_0 /ip4/127.0.0.1/tcp/10101" > expected && - - ipfsi 0 p2p close -l /ip4/127.0.0.1/tcp/10101 && - ipfsi 0 p2p ls > actual && - test_cmp expected actual -' - -check_test_ports - -test_expect_success "'ipfs p2p close' closes by listen addr" ' - ipfsi 0 p2p close -l /p2p/$PEERID_0 && - ipfsi 0 p2p ls > actual && - test_must_be_empty actual -' - -# Peer reporting - -test_expect_success 'start p2p listener reporting peer' ' - ipfsi 0 p2p listen /x/p2p-test /ip4/127.0.0.1/tcp/10101 --report-peer-id 2>&1 > listener-stdouterr.log -' - -test_expect_success 'C->S Spawn receiving server' ' - ma-pipe-unidir --listen --pidFile=listener.pid recv /ip4/127.0.0.1/tcp/10101 > server.out & - - test_wait_for_file 30 100ms listener.pid && - kill -0 $(cat listener.pid) -' - -test_expect_success 'C->S Setup client side' ' - ipfsi 1 p2p forward /x/p2p-test /ip4/127.0.0.1/tcp/10102 /p2p/${PEERID_0} 2>&1 > dialer-stdouterr.log -' - -test_expect_success 'C->S Connect and receive data' ' - ma-pipe-unidir send /ip4/127.0.0.1/tcp/10102 < test1.bin -' - -test_expect_success 'C->S Ensure server finished' ' - go-sleep 250ms && - test ! -f listener.pid -' - -test_expect_success 'C->S Output looks good' ' - echo ${PEERID_1} > expected && - cat test1.bin >> expected && - test_cmp server.out expected -' - -test_expect_success 'C->S Close listeners' ' - ipfsi 1 p2p close -p /x/p2p-test && - ipfsi 0 p2p close -p /x/p2p-test && - - ipfsi 0 p2p ls > actual && - test_must_be_empty actual && - - ipfsi 1 p2p ls > actual && - test_must_be_empty actual -' - -test_expect_success "non /x/ scoped protocols are not allowed" ' - test_must_fail ipfsi 0 p2p listen /its/not/a/x/path /ip4/127.0.0.1/tcp/10101 2> actual && - echo "Error: protocol name must be within '"'"'/x/'"'"' namespace" > expected - test_cmp expected actual -' - -check_test_ports - -test_expect_success 'start p2p listener on custom proto' ' - ipfsi 0 p2p listen --allow-custom-protocol /p2p-test /ip4/127.0.0.1/tcp/10101 2>&1 > listener-stdouterr.log && - test_must_be_empty listener-stdouterr.log -' - -spawn_sending_server - -test_expect_success 'S->C Setup client side (custom proto)' ' - ipfsi 1 p2p forward --allow-custom-protocol /p2p-test /ip4/127.0.0.1/tcp/10102 /p2p/${PEERID_0} 2>&1 > dialer-stdouterr.log -' - -test_server_to_client - -test_expect_success 'C->S Close local listener' ' - ipfsi 1 p2p close -p /p2p-test - ipfsi 1 p2p ls > actual && - test_must_be_empty actual -' - -check_test_ports - -test_expect_success 'stop iptb' ' - iptb stop -' - -check_test_ports - -test_done - diff --git a/test/sharness/t0181-private-network.sh b/test/sharness/t0181-private-network.sh deleted file mode 100755 index 5f8979d3a52..00000000000 --- a/test/sharness/t0181-private-network.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test private network feature" - -. lib/test-lib.sh - -test_init_ipfs - -export LIBP2P_FORCE_PNET=1 - -test_expect_success "daemon won't start with force pnet env but with no key" ' - test_must_fail go-timeout 5 ipfs daemon > stdout 2>&1 -' - -unset LIBP2P_FORCE_PNET - -test_expect_success "daemon output includes info about the reason" ' - grep "private network was not configured but is enforced by the environment" stdout || - test_fsh cat stdout -' - -pnet_key() { - echo '/key/swarm/psk/1.0.0/' - echo '/bin/' - random 32 -} - -pnet_key > "${IPFS_PATH}/swarm.key" - -LIBP2P_FORCE_PNET=1 test_launch_ipfs_daemon - -test_expect_success "set up iptb testbed" ' - iptb testbed create -type localipfs -count 5 -force -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true && - iptb run -- ipfs config --json "Swarm.Transports.Network.Websocket" false && - iptb run -- ipfs config --json Addresses.Swarm '"'"'["/ip4/127.0.0.1/tcp/0"]'"'"' -' - -set_key() { - node="$1" - keyfile="$2" - - cp "$keyfile" "${IPTB_ROOT}/testbeds/default/${node}/swarm.key" -} - -pnet_key > key1 -pnet_key > key2 - -set_key 1 key1 -set_key 2 key1 - -set_key 3 key2 -set_key 4 key2 - -unset LIBP2P_FORCE_PNET - -test_expect_success "start nodes" ' - iptb start -wait [0-4] -' - -test_expect_success "try connecting node in public network with priv networks" ' - test_must_fail iptb connect --timeout=2s [1-4] 0 -' - -test_expect_success "node 0 (public network) swarm is empty" ' - ipfsi 0 swarm peers && - [ $(ipfsi 0 swarm peers | wc -l) -eq 0 ] -' - -test_expect_success "try connecting nodes in different private networks" ' - test_must_fail iptb connect 2 3 -' - -test_expect_success "node 3 (pnet 2) swarm is empty" ' - ipfsi 3 swarm peers && - [ $(ipfsi 3 swarm peers | wc -l) -eq 0 ] -' - -test_expect_success "connect nodes in the same pnet" ' - iptb connect 1 2 && - iptb connect 3 4 -' - -test_expect_success "nodes 1 and 2 have connected" ' - ipfsi 2 swarm peers && - [ $(ipfsi 2 swarm peers | wc -l) -eq 1 ] -' - -test_expect_success "nodes 3 and 4 have connected" ' - ipfsi 4 swarm peers && - [ $(ipfsi 4 swarm peers | wc -l) -eq 1 ] -' - - -run_single_file_test() { - node1=$1 - node2=$2 - - test_expect_success "add a file on node$node1" ' - random 1000000 > filea && - FILEA_HASH=$(ipfsi $node1 add -q filea) - ' - - check_file_fetch $node1 $FILEA_HASH filea - check_file_fetch $node2 $FILEA_HASH filea -} - -check_file_fetch() { - node="$1" - fhash="$2" - fname="$3" - - test_expect_success "can fetch file" ' - ipfsi $node cat $fhash > fetch_out - ' - - test_expect_success "file looks good" ' - test_cmp $fname fetch_out - ' -} - -run_single_file_test 1 2 -run_single_file_test 2 1 - -run_single_file_test 3 4 -run_single_file_test 4 3 - - -test_expect_success "stop testbed" ' - iptb stop -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0182-circuit-relay.sh b/test/sharness/t0182-circuit-relay.sh deleted file mode 100755 index c79edfc8ea8..00000000000 --- a/test/sharness/t0182-circuit-relay.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test circuit relay" - -. lib/test-lib.sh - -# start iptb + wait for peering -NUM_NODES=3 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true -' - -# Network toplogy: A <-> Relay <-> B -test_expect_success 'start up nodes for configuration' ' - iptb start -wait -- --routing=none -' - -test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_1=$(iptb attr get 1 id) && - PEERID_2=$(iptb attr get 2 id) -' - -relayaddrs=$(ipfsi 1 swarm addrs local | jq --raw-input . | jq --slurp .) -staticrelay=$(ipfsi 1 swarm addrs local | sed -e "s|$|/p2p/$PEERID_1|g" | jq --raw-input . | jq --slurp .) - -test_expect_success 'configure the relay node as a static relay for node A' ' - ipfsi 0 config Internal.Libp2pForceReachability private && - ipfsi 0 config --json Swarm.RelayClient.Enabled true && - ipfsi 0 config --json Swarm.RelayClient.StaticRelays "$staticrelay" -' - -test_expect_success 'configure the relay node' ' - ipfsi 1 config Internal.Libp2pForceReachability public && - ipfsi 1 config --json Swarm.RelayService.Enabled true && - ipfsi 1 config --json Addresses.Swarm "$relayaddrs" -' - -test_expect_success 'configure the node B' ' - ipfsi 2 config Internal.Libp2pForceReachability private && - ipfsi 2 config --json Swarm.RelayClient.Enabled true -' - -test_expect_success 'restart nodes' ' - iptb stop && - iptb_wait_stop && - iptb start -wait -- --routing=none -' - -test_expect_success 'connect A <-> Relay' ' - iptb connect 0 1 -' - -test_expect_success 'connect B <-> Relay' ' - iptb connect 2 1 -' - -test_expect_success 'wait until relay is ready to do work' ' - while ! ipfsi 2 swarm connect /p2p/$PEERID_1/p2p-circuit/p2p/$PEERID_0; do - iptb stop && - iptb_wait_stop && - iptb start -wait -- --routing=none && - iptb connect 0 1 && - iptb connect 2 1 && - sleep 5 - done -' - -test_expect_success 'connect A <-Relay-> B' ' - ipfsi 2 swarm connect /p2p/$PEERID_1/p2p-circuit/p2p/$PEERID_0 > peers_out -' - -test_expect_success 'output looks good' ' - echo "connect $PEERID_0 success" > peers_exp && - test_cmp peers_exp peers_out -' - -test_expect_success 'peers for A look good' ' - ipfsi 0 swarm peers > peers_out && - test_should_contain "/p2p/$PEERID_1/p2p-circuit/p2p/$PEERID_2$" peers_out -' - -test_expect_success 'peers for B look good' ' - ipfsi 2 swarm peers > peers_out && - test_should_contain "/p2p/$PEERID_1/p2p-circuit/p2p/$PEERID_0$" peers_out -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0183-namesys-pubsub.sh b/test/sharness/t0183-namesys-pubsub.sh deleted file mode 100755 index f9658e507bf..00000000000 --- a/test/sharness/t0183-namesys-pubsub.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test IPNS pubsub" - -. lib/test-lib.sh - -# start iptb + wait for peering -NUM_NODES=5 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init -' - -run_ipnspubsub_tests() { - - test_expect_success 'peer ids' ' - PEERID_0_BASE36=$(ipfsi 0 key list --ipns-base=base36 -l | grep self | head -n 1 | cut -d " " -f1) && - PEERID_0_B58MH=$(ipfsi 0 key list --ipns-base=b58mh -l | grep self | head -n 1 | cut -d " " -f1) - ' - - test_expect_success 'check namesys pubsub state' ' - echo enabled > expected && - ipfsi 0 name pubsub state > state0 && - ipfsi 1 name pubsub state > state1 && - ipfsi 2 name pubsub state > state2 && - test_cmp expected state0 && - test_cmp expected state1 && - test_cmp expected state2 - ' - - # These commands are *expected* to fail. We haven't published anything yet. - test_expect_success 'subscribe nodes to the publisher topic' ' - ipfsi 1 name resolve /ipns/$PEERID_0_BASE36 --timeout=1s; - ipfsi 2 name resolve /ipns/$PEERID_0_BASE36 --timeout=1s; - true - ' - - test_expect_success 'check subscriptions' ' - echo /ipns/$PEERID_0_BASE36 > expected_base36 && - echo /ipns/$PEERID_0_B58MH > expected_b58mh && - ipfsi 1 name pubsub subs > subs1 && - ipfsi 2 name pubsub subs > subs2 && - ipfsi 1 name pubsub subs --ipns-base=b58mh > subs1_b58mh && - ipfsi 2 name pubsub subs --ipns-base=b58mh > subs2_b58mh && - test_cmp expected_base36 subs1 && - test_cmp expected_base36 subs2 && - test_cmp expected_b58mh subs1_b58mh && - test_cmp expected_b58mh subs2_b58mh - ' - - test_expect_success 'add an object on publisher node' ' - echo "ipns is super fun" > file && - HASH_FILE=$(ipfsi 0 add -q file) - ' - - test_expect_success 'publish that object as an ipns entry' ' - ipfsi 0 name publish $HASH_FILE - ' - - test_expect_success 'wait for the flood' ' - sleep 1 - ' - - test_expect_success 'resolve name in subscriber nodes' ' - echo "/ipfs/$HASH_FILE" > expected && - ipfsi 1 name resolve /ipns/$PEERID_0_BASE36 > name1 && - ipfsi 2 name resolve /ipns/$PEERID_0_BASE36 > name2 && - test_cmp expected name1 && - test_cmp expected name2 - ' - - test_expect_success 'cancel subscriptions to the publisher topic' ' - ipfsi 1 name pubsub cancel /ipns/$PEERID_0_BASE36 && - ipfsi 2 name pubsub cancel /ipns/$PEERID_0_BASE36 - ' - - test_expect_success 'check subscriptions' ' - rm -f expected && touch expected && - ipfsi 1 name pubsub subs > subs1 && - ipfsi 2 name pubsub subs > subs2 && - test_cmp expected subs1 && - test_cmp expected subs2 - ' - - test_expect_success "shut down iptb" ' - iptb stop - ' - -} - -# Test everything with ipns-pubsub enabled via config -test_expect_success 'enable ipns over pubsub' ' - iptb run -- ipfs config --json Ipns.UsePubsub true -' - -startup_cluster $NUM_NODES -run_ipnspubsub_tests - -# Test again, this time CLI parameter override the config -test_expect_success 'enable ipns over pubsub' ' - iptb run -- ipfs config --json Ipns.UsePubsub false -' -startup_cluster $NUM_NODES --enable-namesys-pubsub -run_ipnspubsub_tests - -# Confirm negative CLI flag takes precedence over positive config - -test_expect_success 'enable the pubsub-ipns via config' ' - iptb run -- ipfs config --json Ipns.UsePubsub true -' -startup_cluster $NUM_NODES --enable-namesys-pubsub=false - -test_expect_success 'ipns pubsub cmd fails because it was disabled via cli flag' ' - test_expect_code 1 ipfsi 1 name pubsub subs 2> pubsubipns_cmd_out -' - -test_expect_success "ipns pubsub cmd produces error" " - echo -e \"Error: IPNS pubsub subsystem is not enabled\nUse 'ipfs name pubsub subs --help' for information about this command\" > expected && - test_cmp expected pubsubipns_cmd_out -" - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0184-http-proxy-over-p2p.sh b/test/sharness/t0184-http-proxy-over-p2p.sh deleted file mode 100755 index 98e2f3ab20c..00000000000 --- a/test/sharness/t0184-http-proxy-over-p2p.sh +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test http proxy over p2p" - -. lib/test-lib.sh - -if ! test_have_prereq SOCAT; then - skip_all="skipping '$test_description': socat is not available" - test_done -fi - -WEB_SERVE_PORT=5099 -IPFS_GATEWAY_PORT=5199 -SENDER_GATEWAY="http://127.0.0.1:$IPFS_GATEWAY_PORT" - -function show_logs() { - - echo "*****************" - echo " RECEIVER LOG " - echo "*****************" - iptb logs 1 - echo "*****************" - echo " SENDER LOG " - echo "*****************" - iptb logs 0 - echo "*****************" - echo "REMOTE_SERVER LOG" - echo $REMOTE_SERVER_LOG - echo "*****************" - cat $REMOTE_SERVER_LOG -} - -function start_http_server() { - REMOTE_SERVER_LOG="server.log" - rm -f $REMOTE_SERVER_LOG - - touch response - socat tcp-listen:$WEB_SERVE_PORT,fork,bind=127.0.0.1,reuseaddr 'SYSTEM:cat response'!!CREATE:$REMOTE_SERVER_LOG & - REMOTE_SERVER_PID=$! - - socat /dev/null tcp:127.0.01:$WEB_SERVE_PORT,retry=10 - return $? -} - -function teardown_remote_server() { - exec 7<&- - kill $REMOTE_SERVER_PID > /dev/null 2>&1 - wait $REMOTE_SERVER_PID || true -} - -function serve_content() { - local body=$1 - local status_code=${2:-"200 OK"} - local length=$((1 + ${#body})) - echo -e "HTTP/1.1 $status_code\nContent-length: $length\n\n$body" > response -} - -function curl_check_response_code() { - local expected_status_code=$1 - local path_stub=${2:-p2p/$RECEIVER_ID/http/index.txt} - local status_code=$(curl -s --write-out %{http_code} --output /dev/null $SENDER_GATEWAY/$path_stub) - - if [[ "$status_code" -ne "$expected_status_code" ]]; - then - echo "Found status-code "$status_code", expected "$expected_status_code - return 1 - fi - - return 0 -} - -function curl_send_proxy_request_and_check_response() { - local expected_status_code=$1 - local expected_content=$2 - - # - # make a request to SENDER_IPFS via the proxy endpoint - # - CONTENT_PATH="retrieved-file" - STATUS_CODE="$(curl -s -o $CONTENT_PATH --write-out %{http_code} $SENDER_GATEWAY/p2p/$RECEIVER_ID/http/index.txt)" - - # - # check status code - # - if [[ "$STATUS_CODE" -ne "$expected_status_code" ]]; - then - echo -e "Found status-code "$STATUS_CODE", expected "$expected_status_code - show_logs - return 1 - fi - - # - # check content - # - RESPONSE_CONTENT="$(tail -n 1 $CONTENT_PATH)" - if [[ "$RESPONSE_CONTENT" == "$expected_content" ]]; - then - return 0 - else - echo -e "Found response content:\n'"$RESPONSE_CONTENT"'\nthat differs from expected content:\n'"$expected_content"'" - return 1 - fi -} - -function curl_send_multipart_form_request() { - local expected_status_code=$1 - local FILE_PATH="uploaded-file" - FILE_CONTENT="curl will send a multipart-form POST request when sending a file which is handy" - echo $FILE_CONTENT > $FILE_PATH - # - # send multipart form request - # - STATUS_CODE="$(curl -o /dev/null -s -F file=@$FILE_PATH --write-out %{http_code} $SENDER_GATEWAY/p2p/$RECEIVER_ID/http/index.txt)" - # - # check status code - # - if [[ "$STATUS_CODE" -ne "$expected_status_code" ]]; - then - echo -e "Found status-code "$STATUS_CODE", expected "$expected_status_code - return 1 - fi - # - # check request method - # - if ! grep "POST /index.txt" $REMOTE_SERVER_LOG > /dev/null; - then - echo "Remote server request method/resource path was incorrect" - show_logs - return 1 - fi - # - # check request is multipart-form - # - if ! grep "Content-Type: multipart/form-data;" $REMOTE_SERVER_LOG > /dev/null; - then - echo "Request content-type was not multipart/form-data" - show_logs - return 1 - fi - return 0 -} - -test_expect_success 'configure nodes' ' - iptb testbed create -type localipfs -count 2 -force -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true && - ipfsi 0 config --json Experimental.Libp2pStreamMounting true && - ipfsi 1 config --json Experimental.Libp2pStreamMounting true && - ipfsi 0 config --json Experimental.P2pHttpProxy true && - ipfsi 0 config --json Addresses.Gateway "[\"/ip4/127.0.0.1/tcp/$IPFS_GATEWAY_PORT\"]" -' - -test_expect_success 'configure a subdomain gateway with /p2p/ path whitelisted' " - ipfsi 0 config --json Gateway.PublicGateways '{ - \"example.com\": { - \"UseSubdomains\": true, - \"Paths\": [\"/p2p/\"] - } - }' -" - -test_expect_success 'start and connect nodes' ' - iptb start -wait && iptb connect 0 1 -' - -test_expect_success 'setup p2p listener on the receiver' ' - ipfsi 1 p2p listen --allow-custom-protocol /http /ip4/127.0.0.1/tcp/$WEB_SERVE_PORT && - ipfsi 1 p2p listen /x/custom/http /ip4/127.0.0.1/tcp/$WEB_SERVE_PORT -' - -test_expect_success 'setup environment' ' - RECEIVER_ID=$(ipfsi 1 id -f="" --peerid-base=b58mh) - RECEIVER_ID_CIDv1=$(ipfsi 1 id -f="" --peerid-base=base36) -' - -test_expect_success 'handle proxy http request sends bad-gateway when remote server not available ' ' - curl_send_proxy_request_and_check_response 502 "" -' - -test_expect_success 'start http server' ' - start_http_server -' - -test_expect_success 'handle proxy http request propagates error response from remote' ' - serve_content "SORRY GUYS, I LOST IT" "404 Not Found" && - curl_send_proxy_request_and_check_response 404 "SORRY GUYS, I LOST IT" -' - -test_expect_success 'handle proxy http request ' ' - serve_content "THE WOODS ARE LOVELY DARK AND DEEP" && - curl_send_proxy_request_and_check_response 200 "THE WOODS ARE LOVELY DARK AND DEEP" -' - -test_expect_success 'handle proxy http request invalid request' ' - curl_check_response_code 400 p2p/DERPDERPDERP -' - -test_expect_success 'handle proxy http request unknown proxy peer ' ' - UNKNOWN_PEER="k51qzi5uqu5dlmbel1sd8rs4emr3bfosk9bm4eb42514r4lakt4oxw3a3fa2tm" && - curl_check_response_code 502 p2p/$UNKNOWN_PEER/http/index.txt -' - -test_expect_success 'handle proxy http request to invalid proxy peer ' ' - curl_check_response_code 400 p2p/invalid_peer/http/index.txt -' - -test_expect_success 'handle proxy http request to custom protocol' ' - serve_content "THE WOODS ARE LOVELY DARK AND DEEP" && - curl_check_response_code 200 p2p/$RECEIVER_ID/x/custom/http/index.txt -' - -test_expect_success 'handle proxy http request to missing protocol' ' - serve_content "THE WOODS ARE LOVELY DARK AND DEEP" && - curl_check_response_code 502 p2p/$RECEIVER_ID/x/missing/http/index.txt -' - -test_expect_success 'handle proxy http request missing the /http' ' - curl_check_response_code 400 p2p/$RECEIVER_ID/x/custom/index.txt -' - -test_expect_success 'handle multipart/form-data http request' ' - serve_content "OK" && - curl_send_multipart_form_request 200 -' - -# OK: $peerid.p2p.example.com/http/index.txt -test_expect_success "handle http request to a subdomain gateway" ' - serve_content "SUBDOMAIN PROVIDES ORIGIN ISOLATION PER RECEIVER_ID" && - curl -H "Host: $RECEIVER_ID_CIDv1.p2p.example.com" -sD - $SENDER_GATEWAY/http/index.txt > p2p_response && - test_should_contain "SUBDOMAIN PROVIDES ORIGIN ISOLATION PER RECEIVER_ID" p2p_response -' - -# FAIL: $peerid.p2p.example.com/p2p/$peerid/http/index.txt -test_expect_success "handle invalid http request to a subdomain gateway" ' - serve_content "SUBDOMAIN DOES NOT SUPPORT FULL /p2p/ PATH" && - curl -H "Host: $RECEIVER_ID_CIDv1.p2p.example.com" -sD - $SENDER_GATEWAY/p2p/$RECEIVER_ID/http/index.txt > p2p_response && - test_should_contain "400 Bad Request" p2p_response -' - -# REDIRECT: example.com/p2p/$peerid/http/index.txt β†’ $peerid.p2p.example.com/http/index.txt -test_expect_success "redirect http path request to subdomain gateway" ' - serve_content "SUBDOMAIN ROOT REDIRECTS /p2p/ PATH TO SUBDOMAIN" && - curl -H "Host: example.com" -sD - $SENDER_GATEWAY/p2p/$RECEIVER_ID/http/index.txt > p2p_response && - test_should_contain "Location: http://$RECEIVER_ID_CIDv1.p2p.example.com/http/index.txt" p2p_response -' - -test_expect_success 'stop http server' ' - teardown_remote_server -' - -test_expect_success 'stop nodes' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0185-autonat.sh b/test/sharness/t0185-autonat.sh deleted file mode 100755 index 9cde1ad5cbd..00000000000 --- a/test/sharness/t0185-autonat.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test autonat" - -. lib/test-lib.sh - -# NOTE: This is currently a really dumb test just to make sure this service -# starts. We need better tests but testing AutoNAT without public IP addresses -# is tricky. - -test_init_ipfs - -test_expect_success "enable autonat" ' - ipfs config AutoNAT.ServiceMode enabled -' - -test_launch_ipfs_daemon - -test_kill_ipfs_daemon - -test_expect_success "enable autonat" ' - ipfs config AutoNAT.ServiceMode disabled -' - -test_launch_ipfs_daemon - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0190-quic-ping.sh b/test/sharness/t0190-quic-ping.sh deleted file mode 100755 index 28335870d1b..00000000000 --- a/test/sharness/t0190-quic-ping.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test ping over QUIC command" - -. lib/test-lib.sh - -test_init_ipfs - -# start iptb + wait for peering -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count 2 -init -' - -addr1='"[\"/ip4/127.0.0.1/udp/0/quic-v1\"]"' -addr2='"[\"/ip4/127.0.0.1/udp/0/quic-v1\"]"' -test_expect_success "add QUIC swarm addresses" ' - ipfsi 0 config --json Addresses.Swarm '$addr1' && - ipfsi 1 config --json Addresses.Swarm '$addr2' -' - -startup_cluster 2 - -test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_1=$(iptb attr get 1 id) -' - -test_expect_success "test ping other" ' - ipfsi 0 ping -n2 -- "$PEERID_1" && - ipfsi 1 ping -n2 -- "$PEERID_0" -' - -test_expect_success "test ping self" ' - test_must_fail ipfsi 0 ping -n2 -- "$PEERID_0" && - test_must_fail ipfsi 1 ping -n2 -- "$PEERID_1" -' - -test_expect_success "test ping 0" ' - test_must_fail ipfsi 0 ping -n0 -- "$PEERID_1" && - test_must_fail ipfsi 1 ping -n0 -- "$PEERID_0" -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0191-webtransport-ping.sh b/test/sharness/t0191-webtransport-ping.sh deleted file mode 100755 index 4b5d20ed07c..00000000000 --- a/test/sharness/t0191-webtransport-ping.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test ping over WebTransport command" - -. lib/test-lib.sh - -test_init_ipfs - -# start iptb + wait for peering -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count 2 -init -' - -addr1='"[\"/ip4/127.0.0.1/udp/0/quic-v1/webtransport\"]"' -addr2='"[\"/ip4/127.0.0.1/udp/0/quic-v1/webtransport\"]"' -test_expect_success "add WebTransport swarm addresses" ' - ipfsi 0 config --json Addresses.Swarm '$addr1' && - ipfsi 0 config --json Swarm.Transports.Network.WebTransport true && - ipfsi 1 config --json Addresses.Swarm '$addr2' && - ipfsi 1 config --json Swarm.Transports.Network.WebTransport true -' - -startup_cluster 2 - -test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_1=$(iptb attr get 1 id) -' - -test_expect_success "test ping other" ' - ipfsi 0 ping -n2 -- "$PEERID_1" && - ipfsi 1 ping -n2 -- "$PEERID_0" -' - -test_expect_success "test ping self" ' - test_must_fail ipfsi 0 ping -n2 -- "$PEERID_0" && - test_must_fail ipfsi 1 ping -n2 -- "$PEERID_1" -' - -test_expect_success "test ping 0" ' - test_must_fail ipfsi 0 ping -n0 -- "$PEERID_1" && - test_must_fail ipfsi 1 ping -n0 -- "$PEERID_0" -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0195-noise.sh b/test/sharness/t0195-noise.sh deleted file mode 100755 index 63e3703ded2..00000000000 --- a/test/sharness/t0195-noise.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test ping over NOISE command" - -. lib/test-lib.sh - -test_init_ipfs - -# start iptb + wait for peering -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count 4 -init -' - -tcp_addr='"[\"/ip4/127.0.0.1/tcp/0\"]"' -test_expect_success "configure security transports" ' -iptb run < connect_error 2>&1 && - test_should_contain "failed to negotiate security protocol" connect_error || - test_fsh cat connect_error -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0220-bitswap.sh b/test/sharness/t0220-bitswap.sh deleted file mode 100755 index 412437651be..00000000000 --- a/test/sharness/t0220-bitswap.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test bitswap commands" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon - -test_expect_success "'ipfs bitswap stat' succeeds" ' - ipfs bitswap stat >stat_out -' - -test_expect_success "'ipfs bitswap stat' output looks good" ' - cat <expected && -bitswap status - blocks received: 0 - blocks sent: 0 - data received: 0 - data sent: 0 - dup blocks received: 0 - dup data received: 0 - wantlist [0 keys] - partners [0] -EOF - test_cmp expected stat_out -' - -test_expect_success "ipfs peer id looks good" ' - PEERID=$(ipfs config Identity.PeerID) && - test_check_peerid "$PEERID" -' - -test_expect_success "'ipfs bitswap wantlist -p' works" ' - ipfs bitswap wantlist -p "$PEERID" >wantlist_p_out -' - -test_expect_success "'ipfs bitswap wantlist -p' output looks good" ' - test_must_be_empty wantlist_p_out -' - -test_expect_success "hash was removed from wantlist" ' - ipfs bitswap wantlist > wantlist_out && - test_must_be_empty wantlist_out -' - -test_expect_success "'ipfs bitswap stat' succeeds" ' - ipfs bitswap stat >stat_out -' - -test_expect_success "'ipfs bitswap stat' output looks good" ' - cat <expected && -bitswap status - blocks received: 0 - blocks sent: 0 - data received: 0 - data sent: 0 - dup blocks received: 0 - dup data received: 0 - wantlist [0 keys] - partners [0] -EOF - test_cmp expected stat_out -' - -test_expect_success "'ipfs bitswap wantlist -p' works" ' - ipfs bitswap wantlist -p "$PEERID" >wantlist_p_out -' - -test_expect_success "'ipfs bitswap wantlist -p' output looks good" ' - test_cmp wantlist_out wantlist_p_out -' - -test_expect_success "'ipfs bitswap stat --human' succeeds" ' - ipfs bitswap stat --human >stat_out_human -' - - -test_expect_success "'ipfs bitswap stat --human' output looks good" ' - cat <expected && -bitswap status - blocks received: 0 - blocks sent: 0 - data received: 0 B - data sent: 0 B - dup blocks received: 0 - dup data received: 0 B - wantlist [0 keys] - partners [0] -EOF - test_cmp expected stat_out_human -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0230-channel-streaming-http-content-type.sh b/test/sharness/t0230-channel-streaming-http-content-type.sh deleted file mode 100755 index be23d8151c0..00000000000 --- a/test/sharness/t0230-channel-streaming-http-content-type.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Cayman Nava -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test Content-Type for channel-streaming commands" - -. lib/test-lib.sh - -test_init_ipfs - -test_ls_cmd() { - - test_expect_success "Text encoded channel-streaming command succeeds" ' - mkdir -p testdir && - echo "hello test" >testdir/test.txt && - ipfs add -r testdir && - curl -X POST -i "http://$API_ADDR/api/v0/refs?arg=QmTcJAn3JP8ZMAKS6WS75q8sbTyojWKbxcUHgLYGWur4Ym&stream-channels=true&encoding=text" >actual_output - ' - - test_expect_success "Text encoded channel-streaming command output looks good" ' - printf "HTTP/1.1 200 OK\r\n" >expected_output && - printf "Access-Control-Allow-Headers: X-Stream-Output, X-Chunked-Output, X-Content-Length\r\n" >>expected_output && - printf "Access-Control-Expose-Headers: X-Stream-Output, X-Chunked-Output, X-Content-Length\r\n" >>expected_output && - printf "Content-Type: text/plain\r\n" >>expected_output && - printf "Server: kubo/%s\r\n" $(ipfs version -n) >>expected_output && - printf "Trailer: X-Stream-Error\r\n" >>expected_output && - printf "Vary: Origin\r\n" >>expected_output && - printf "X-Chunked-Output: 1\r\n" >>expected_output && - printf "Transfer-Encoding: chunked\r\n" >>expected_output && - printf "\r\n" >>expected_output && - echo QmRmPLc1FsPAn8F8F9DQDEYADNX5ER2sgqiokEvqnYknVW >>expected_output && - cat actual_output | grep -vE Date > cleaned_output && - test_cmp expected_output cleaned_output - ' - - test_expect_success "JSON encoded channel-streaming command succeeds" ' - mkdir -p testdir && - echo "hello test" >testdir/test.txt && - ipfs add -r testdir && - curl -X POST -i "http://$API_ADDR/api/v0/refs?arg=QmTcJAn3JP8ZMAKS6WS75q8sbTyojWKbxcUHgLYGWur4Ym&stream-channels=true&encoding=json" >actual_output - ' - - test_expect_success "JSON encoded channel-streaming command output looks good" ' - printf "HTTP/1.1 200 OK\r\n" >expected_output && - printf "Access-Control-Allow-Headers: X-Stream-Output, X-Chunked-Output, X-Content-Length\r\n" >>expected_output && - printf "Access-Control-Expose-Headers: X-Stream-Output, X-Chunked-Output, X-Content-Length\r\n" >>expected_output && - printf "Content-Type: application/json\r\n" >>expected_output && - printf "Server: kubo/%s\r\n" $(ipfs version -n) >>expected_output && - printf "Trailer: X-Stream-Error\r\n" >>expected_output && - printf "Vary: Origin\r\n" >>expected_output && - printf "X-Chunked-Output: 1\r\n" >>expected_output && - printf "Transfer-Encoding: chunked\r\n" >>expected_output && - printf "\r\n" >>expected_output && - cat <<-\EOF >>expected_output && -{"Ref":"QmRmPLc1FsPAn8F8F9DQDEYADNX5ER2sgqiokEvqnYknVW","Err":""} -EOF - printf "\n" >> expected_output && - perl -pi -e '"'"'chomp if eof'"'"' expected_output && - cat actual_output | grep -vE Date > cleaned_output && - test_cmp expected_output cleaned_output - ' -} - -# should work online (only) -test_launch_ipfs_daemon -test_ls_cmd -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0231-channel-streaming.sh b/test/sharness/t0231-channel-streaming.sh deleted file mode 100755 index 36e855fb7c2..00000000000 --- a/test/sharness/t0231-channel-streaming.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test output of streaming json commands" - -. lib/test-lib.sh - -test_init_ipfs - -get_api_port() { - cat "$IPFS_PATH/api" | awk -F/ '{ print $5 }' -} - -test_ls_cmd() { - test_expect_success "make a file with multiple refs" ' - HASH=$(random 1000000 | ipfs add -q) - ' - - test_expect_success "can get refs through curl" ' - PORT=$(get_api_port) && - curl http://localhost:$PORT/api/v0/refs/$HASH > output - ' - - # make sure newlines are printed between each object - test_expect_success "output looks good" ' - test_expect_code 1 grep "}{" output > /dev/null - ' -} - -# should work online (only) -test_launch_ipfs_daemon -test_ls_cmd -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0235-cli-request.sh b/test/sharness/t0235-cli-request.sh deleted file mode 100755 index 02ef514dedf..00000000000 --- a/test/sharness/t0235-cli-request.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test http requests made by cli" - -. lib/test-lib.sh - -if ! test_have_prereq SOCAT; then - skip_all="skipping '$test_description': socat is not available" - test_done -fi - - -test_init_ipfs - -test_expect_success "start nc" ' - rm -f nc_out nc_outp nc_inp && mkfifo nc_inp nc_outp - - socat PIPE:nc_inp!!PIPE:nc_outp tcp-listen:5005,fork,max-children=1,bind=127.0.0.1 & - NCPID=$! - - exec 6>nc_inp 7nc_out && - - echo -e "HTTP/1.1 200 OK\r" >&6 && - echo -e "Content-Type: application/json\r" >&6 && - echo -e "Content-Length: 21\r" >&6 && - echo -e "\r" >&6 && - echo -e "{\"Version\":\"0.23.0\"}\r" >&6 && - echo -e "\r" >&6 && - - # handle request for /api/v0/cat - while read line; do - if [[ "$line" == "$(echo -e "\r")" ]]; then - break - fi - echo "$line" - done <&7 >nc_out && - - echo -e "HTTP/1.1 200 OK\r" >&6 && - echo -e "Content-Type: text/plain\r" >&6 && - echo -e "Content-Length: 0\r" >&6 && - echo -e "\r" >&6 && - exec 6<&- && - - # Wait for IPFS - wait $IPFSPID -' - -test_expect_success "stop nc" ' - kill "$NCPID" && wait "$NCPID" || true -' - -test_expect_success "output does not contain multipart info" ' - test_expect_code 1 grep multipart nc_out -' - -test_expect_success "request looks good" ' - grep "POST /api/v0/cat" nc_out -' - -test_expect_success "api flag does not appear in request" ' - test_expect_code 1 grep "api=/ip4" nc_out -' - -test_expect_success "host has dns name not ip address" ' - grep "Host: localhost:5005" nc_out -' - -test_done diff --git a/test/sharness/t0236-cli-api-dns-resolve.sh b/test/sharness/t0236-cli-api-dns-resolve.sh deleted file mode 100755 index b4213183401..00000000000 --- a/test/sharness/t0236-cli-api-dns-resolve.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test dns resolution of api endpoint by cli" - -. lib/test-lib.sh - -if ! test_have_prereq SOCAT; then - skip_all="skipping '$test_description': socat is not available" - test_done -fi - -test_init_ipfs - -test_expect_success "start nc" ' - rm -f nc_out nc_outp nc_inp && mkfifo nc_inp nc_outp - - socat PIPE:nc_inp!!PIPE:nc_outp tcp-listen:5006,fork,max-children=1,bind=127.0.0.1 & - NCPID=$! - - exec 6>nc_inp 7nc_out && - - echo -e "HTTP/1.1 200 OK\r" >&6 && - echo -e "Content-Type: application/json\r" >&6 && - echo -e "Content-Length: 21\r" >&6 && - echo -e "\r" >&6 && - echo -e "{\"Version\":\"0.23.0\"}\r" >&6 && - echo -e "\r" >&6 && - - # handle request for /api/v0/cat - while read line; do - if [[ "$line" == "$(echo -e "\r")" ]]; then - break - fi - echo "$line" - done <&7 >nc_out && - - echo -e "HTTP/1.1 200 OK\r" >&6 && - echo -e "Content-Type: text/plain\r" >&6 && - echo -e "Content-Length: 0\r" >&6 && - echo -e "\r" >&6 && - exec 6<&- && - - # Wait for IPFS - wait $IPFSPID -' - -test_expect_success "stop nc" ' - kill "$NCPID" && wait "$NCPID" || true -' - -test_expect_success "request was received by local nc server" ' - grep "POST /api/v0/cat" nc_out -' - -test_done diff --git a/test/sharness/t0240-republisher.sh b/test/sharness/t0240-republisher.sh deleted file mode 100755 index e52b8bee54a..00000000000 --- a/test/sharness/t0240-republisher.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test ipfs repo operations" - -. lib/test-lib.sh - -export DEBUG=true - -setup_iptb() { - num_nodes="$1" - bound=$(expr "$num_nodes" - 1) - - test_expect_success "iptb init" ' - iptb testbed create -type localipfs -count $num_nodes -init - ' - - for i in $(test_seq 0 "$bound") - do - test_expect_success "set configs up for node $i" ' - ipfsi "$i" config Ipns.RepublishPeriod 40s && - ipfsi "$i" config --json Ipns.ResolveCacheSize 0 - ' - done - - startup_cluster "$num_nodes" -} - -teardown_iptb() { - test_expect_success "shut down nodes" ' - iptb stop - ' -} - -verify_can_resolve() { - num_nodes="$1" - bound=$(expr "$num_nodes" - 1) - name="$2" - expected="$3" - msg="$4" - - for node in $(test_seq 0 "$bound") - do - test_expect_success "$msg: node $node can resolve entry" ' - ipfsi "$node" name resolve "$name" > resolve - ' - - test_expect_success "$msg: output for node $node looks right" ' - printf "/ipfs/$expected\n" > expected && - test_cmp expected resolve - ' - done -} - -verify_cannot_resolve() { - num_nodes="$1" - bound=$(expr "$num_nodes" - 1) - name="$2" - msg="$3" - - for node in $(test_seq 0 "$bound") - do - test_expect_success "$msg: resolution fails on node $node" ' - test_expect_code 1 ipfsi "$node" name resolve "$name" - ' - done -} - -num_test_nodes=4 - -setup_iptb "$num_test_nodes" - -test_expect_success "publish succeeds" ' - HASH=$(date +"%FT%T.%N%z" | ipfsi 1 add -q) && - ipfsi 1 name publish -t 10s $HASH -' - -test_expect_success "get id succeeds" ' - id=$(ipfsi 1 id -f "") -' - -verify_can_resolve "$num_test_nodes" "$id" "$HASH" "just after publishing" - -go-sleep 10s - -verify_cannot_resolve "$num_test_nodes" "$id" "after 10 seconds, records are invalid" - -go-sleep 30s - -verify_can_resolve "$num_test_nodes" "$id" "$HASH" "republisher fires after 30 seconds" - -# - -test_expect_success "generate new key" ' -KEY2=`ipfsi 1 key gen beepboop --type ed25519` -' - -test_expect_success "publish with new key succeeds" ' - HASH=$(date +"%FT%T.%N%z" | ipfsi 1 add -q) && - ipfsi 1 name publish -t 10s -k "$KEY2" $HASH -' - -verify_can_resolve "$num_test_nodes" "$KEY2" "$HASH" "new key just after publishing" - -go-sleep 10s - -verify_cannot_resolve "$num_test_nodes" "$KEY2" "new key cannot resolve after 10 seconds" - -go-sleep 30s - -verify_can_resolve "$num_test_nodes" "$KEY2" "$HASH" "new key can resolve again after republish" - -# - -teardown_iptb - -test_done diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh deleted file mode 100755 index 9c01a5bcf17..00000000000 --- a/test/sharness/t0250-files-api.sh +++ /dev/null @@ -1,930 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test the unix files api" - -. lib/test-lib.sh - -test_init_ipfs - -create_files() { - FILE1=$(echo foo | ipfs add "$@" -q) && - FILE2=$(echo bar | ipfs add "$@" -q) && - FILE3=$(echo baz | ipfs add "$@" -q) && - mkdir -p stuff_test && - echo cats > stuff_test/a && - echo dogs > stuff_test/b && - echo giraffes > stuff_test/c && - DIR1=$(ipfs add -r "$@" -Q stuff_test) -} - -verify_path_exists() { - # simply running ls on a file should be a good 'check' - ipfs files ls $1 -} - -verify_dir_contents() { - dir=$1 - shift - rm -f expected - touch expected - for e in $@ - do - echo $e >> expected - done - - test_expect_success "can list dir" ' - ipfs files ls $dir > output - ' - - test_expect_success "dir entries look good" ' - test_sort_cmp output expected - ' -} - -test_sharding() { - local EXTRA ARGS - EXTRA=$1 - ARGS=$2 # only applied to the initial directory - - test_expect_success "make a directory $EXTRA" ' - ipfs files mkdir $ARGS /foo - ' - - test_expect_success "can make 100 files in a directory $EXTRA" ' - printf "" > list_exp_raw - for i in `seq 100 -1 1` - do - echo $i | ipfs files write --create /foo/file$i || return 1 - echo file$i >> list_exp_raw - done - ' - # Create the files in reverse (unsorted) order (`seq 100 -1 1`) - # to check the sort in the `ipfs files ls` command. `ProtoNode` - # links are always sorted at the DAG layer so the sorting feature - # is tested with sharded directories. - - test_expect_success "sorted listing works $EXTRA" ' - ipfs files ls /foo > list_out && - sort list_exp_raw > list_exp && - test_cmp list_exp list_out - ' - - test_expect_success "unsorted listing works $EXTRA" ' - ipfs files ls -U /foo > list_out && - sort list_exp_raw > sort_list_not_exp && - ! test_cmp sort_list_not_exp list_out - ' - - test_expect_success "can read a file from sharded directory $EXTRA" ' - ipfs files read /foo/file65 > file_out && - echo "65" > file_exp && - test_cmp file_out file_exp - ' - - test_expect_success "can pin a file from sharded directory $EXTRA" ' - ipfs files stat --hash /foo/file42 > pin_file_hash && - ipfs pin add < pin_file_hash > pin_hash - ' - - test_expect_success "can unpin a file from sharded directory $EXTRA" ' - read -r _ HASH _ < pin_hash && - ipfs pin rm $HASH - ' - - test_expect_success "output object was really sharded and has correct hash $EXTRA" ' - ipfs files stat --hash /foo > expected_foo_hash && - echo $SHARD_HASH > actual_foo_hash && - test_cmp expected_foo_hash actual_foo_hash - ' - - test_expect_success "clean up $EXTRA" ' - ipfs files rm -r /foo - ' -} - -test_files_api() { - local EXTRA ARGS RAW_LEAVES - EXTRA=$1 - ARGS=$2 - RAW_LEAVES=$3 - - test_expect_success "can mkdir in root $EXTRA" ' - ipfs files mkdir $ARGS /cats - ' - - test_expect_success "'files ls' lists root by default $EXTRA" ' - ipfs files ls >actual && - echo "cats" >expected && - test_cmp expected actual - ' - - test_expect_success "directory was created $EXTRA" ' - verify_path_exists /cats - ' - - test_expect_success "directory is empty $EXTRA" ' - verify_dir_contents /cats - ' - # we do verification of stat formatting now as we depend on it - - test_expect_success "stat works $EXTRA" ' - ipfs files stat / >stat - ' - - test_expect_success "hash is first line of stat $EXTRA" ' - ipfs ls $(head -1 stat) | grep "cats" - ' - - test_expect_success "stat --hash gives only hash $EXTRA" ' - ipfs files stat --hash / >actual && - head -n1 stat >expected && - test_cmp expected actual - ' - - test_expect_success "stat with multiple format options should fail $EXTRA" ' - test_must_fail ipfs files stat --hash --size / - ' - - test_expect_success "compare hash option with format $EXTRA" ' - ipfs files stat --hash / >expected && - ipfs files stat --format='"'"''"'"' / >actual && - test_cmp expected actual - ' - test_expect_success "compare size option with format $EXTRA" ' - ipfs files stat --size / >expected && - ipfs files stat --format='"'"''"'"' / >actual && - test_cmp expected actual - ' - - test_expect_success "check root hash $EXTRA" ' - ipfs files stat --hash / > roothash - ' - - test_expect_success "stat works outside of MFS" ' - ipfs files stat /ipfs/$DIR1 - ' - - test_expect_success "stat compute the locality of a dag" ' - ipfs files stat --with-local /ipfs/$DIR1 > output - grep -q "(100.00%)" output - ' - - test_expect_success "cannot mkdir / $EXTRA" ' - test_expect_code 1 ipfs files mkdir $ARGS / - ' - - test_expect_success "check root hash was not changed $EXTRA" ' - ipfs files stat --hash / > roothashafter && - test_cmp roothash roothashafter - ' - - test_expect_success "can put files into directory $EXTRA" ' - ipfs files cp /ipfs/$FILE1 /cats/file1 - ' - - test_expect_success "file shows up in directory $EXTRA" ' - verify_dir_contents /cats file1 - ' - - test_expect_success "file has correct hash and size in directory $EXTRA" ' - echo "file1 $FILE1 4" > ls_l_expected && - ipfs files ls -l /cats > ls_l_actual && - test_cmp ls_l_expected ls_l_actual - ' - - test_expect_success "file has correct hash and size listed with -l" ' - echo "file1 $FILE1 4" > ls_l_expected && - ipfs files ls -l /cats/file1 > ls_l_actual && - test_cmp ls_l_expected ls_l_actual - ' - - test_expect_success "file has correct hash and size listed with --long" ' - echo "file1 $FILE1 4" > ls_l_expected && - ipfs files ls --long /cats/file1 > ls_l_actual && - test_cmp ls_l_expected ls_l_actual - ' - - test_expect_success "file has correct hash and size listed with -l --cid-base=base32" ' - echo "file1 `cid-fmt -v 1 -b base32 %s $FILE1` 4" > ls_l_expected && - ipfs files ls --cid-base=base32 -l /cats/file1 > ls_l_actual && - test_cmp ls_l_expected ls_l_actual - ' - - test_expect_success "file shows up with the correct name" ' - echo "file1" > ls_l_expected && - ipfs files ls /cats/file1 > ls_l_actual && - test_cmp ls_l_expected ls_l_actual - ' - - test_expect_success "can stat file $EXTRA" ' - ipfs files stat /cats/file1 > file1stat_orig - ' - - test_expect_success "stat output looks good" ' - grep -v CumulativeSize: file1stat_orig > file1stat_actual && - echo "$FILE1" > file1stat_expect && - echo "Size: 4" >> file1stat_expect && - echo "ChildBlocks: 0" >> file1stat_expect && - echo "Type: file" >> file1stat_expect && - echo "Mode: not set (not set)" >> file1stat_expect && - echo "Mtime: not set" >> file1stat_expect && - test_cmp file1stat_expect file1stat_actual - ' - - test_expect_success "can stat file with --cid-base=base32 $EXTRA" ' - ipfs files stat --cid-base=base32 /cats/file1 > file1stat_orig - ' - - test_expect_success "stat output looks good with --cid-base=base32" ' - grep -v CumulativeSize: file1stat_orig > file1stat_actual && - echo `cid-fmt -v 1 -b base32 %s $FILE1` > file1stat_expect && - echo "Size: 4" >> file1stat_expect && - echo "ChildBlocks: 0" >> file1stat_expect && - echo "Type: file" >> file1stat_expect && - echo "Mode: not set (not set)" >> file1stat_expect && - echo "Mtime: not set" >> file1stat_expect && - test_cmp file1stat_expect file1stat_actual - ' - - test_expect_success "can read file $EXTRA" ' - ipfs files read /cats/file1 > file1out - ' - - test_expect_success "output looks good $EXTRA" ' - echo foo > expected && - test_cmp expected file1out - ' - - test_expect_success "can put another file into root $EXTRA" ' - ipfs files cp /ipfs/$FILE2 /file2 - ' - - test_expect_success "file shows up in root $EXTRA" ' - verify_dir_contents / file2 cats - ' - - test_expect_success "can read file $EXTRA" ' - ipfs files read /file2 > file2out - ' - - test_expect_success "output looks good $EXTRA" ' - echo bar > expected && - test_cmp expected file2out - ' - - test_expect_success "can make deep directory $EXTRA" ' - ipfs files mkdir $ARGS -p /cats/this/is/a/dir - ' - - test_expect_success "directory was created correctly $EXTRA" ' - verify_path_exists /cats/this/is/a/dir && - verify_dir_contents /cats this file1 && - verify_dir_contents /cats/this is && - verify_dir_contents /cats/this/is a && - verify_dir_contents /cats/this/is/a dir && - verify_dir_contents /cats/this/is/a/dir - ' - - test_expect_success "dir has correct name" ' - DIR_HASH=$(ipfs files stat /cats/this --hash) && - echo "this/ $DIR_HASH 0" > ls_dir_expected && - ipfs files ls -l /cats | grep this/ > ls_dir_actual && - test_cmp ls_dir_expected ls_dir_actual - ' - - test_expect_success "can copy file into new dir $EXTRA" ' - ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 - ' - - test_expect_success "can copy file into deep dir using -p flag $EXTRA" ' - ipfs files cp -p /ipfs/$FILE3 /cats/some/other/dir/file3 - ' - - test_expect_success "file copied into deep dir exists $EXTRA" ' - ipfs files read /cats/some/other/dir/file3 > file_out && - echo "baz" > file_exp && - test_cmp file_out file_exp - ' - - test_expect_success "cleanup deep cp -p test $EXTRA" ' - ipfs files rm -r /cats/some - ' - - test_expect_success "can read file $EXTRA" ' - ipfs files read /cats/this/is/a/dir/file3 > output - ' - - test_expect_success "output looks good $EXTRA" ' - echo baz > expected && - test_cmp expected output - ' - - test_expect_success "file shows up in dir $EXTRA" ' - verify_dir_contents /cats/this/is/a/dir file3 - ' - - test_expect_success "can remove file $EXTRA" ' - ipfs files rm /cats/this/is/a/dir/file3 - ' - - test_expect_success "file no longer appears $EXTRA" ' - verify_dir_contents /cats/this/is/a/dir - ' - - test_expect_success "can remove dir $EXTRA" ' - ipfs files rm -r /cats/this/is/a/dir - ' - - test_expect_success "dir no longer appears $EXTRA" ' - verify_dir_contents /cats/this/is/a - ' - - test_expect_success "can remove file from root $EXTRA" ' - ipfs files rm /file2 - ' - - test_expect_success "file no longer appears $EXTRA" ' - verify_dir_contents / cats - ' - - test_expect_success "check root hash $EXTRA" ' - ipfs files stat --hash / > roothash - ' - - test_expect_success "cannot remove root $EXTRA" ' - test_expect_code 1 ipfs files rm -r / - ' - - test_expect_success "check root hash was not changed $EXTRA" ' - ipfs files stat --hash / > roothashafter && - test_cmp roothash roothashafter - ' - - # test read options - - test_expect_success "read from offset works $EXTRA" ' - ipfs files read -o 1 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - echo oo > expected && - test_cmp expected output - ' - - test_expect_success "read with size works $EXTRA" ' - ipfs files read -n 2 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - printf fo > expected && - test_cmp expected output - ' - - test_expect_success "cannot read from negative offset $EXTRA" ' - test_expect_code 1 ipfs files read --offset -3 /cats/file1 - ' - - test_expect_success "read from offset 0 works $EXTRA" ' - ipfs files read --offset 0 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - echo foo > expected && - test_cmp expected output - ' - - test_expect_success "read last byte works $EXTRA" ' - ipfs files read --offset 2 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - echo o > expected && - test_cmp expected output - ' - - test_expect_success "offset past end of file fails $EXTRA" ' - test_expect_code 1 ipfs files read --offset 5 /cats/file1 - ' - - test_expect_success "cannot read negative count bytes $EXTRA" ' - test_expect_code 1 ipfs read --count -1 /cats/file1 - ' - - test_expect_success "reading zero bytes prints nothing $EXTRA" ' - ipfs files read --count 0 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - printf "" > expected && - test_cmp expected output - ' - - test_expect_success "count > len(file) prints entire file $EXTRA" ' - ipfs files read --count 200 /cats/file1 > output - ' - - test_expect_success "output looks good $EXTRA" ' - echo foo > expected && - test_cmp expected output - ' - - # test write - - test_expect_success "can write file $EXTRA" ' - echo "ipfs rocks" > tmpfile && - cat tmpfile | ipfs files write $ARGS $RAW_LEAVES --create /cats/ipfs - ' - - test_expect_success "file was created $EXTRA" ' - verify_dir_contents /cats ipfs file1 this - ' - - test_expect_success "can read file we just wrote $EXTRA" ' - ipfs files read /cats/ipfs > output - ' - - test_expect_success "can write to offset $EXTRA" ' - echo "is super cool" | ipfs files write $ARGS $RAW_LEAVES -o 5 /cats/ipfs - ' - - test_expect_success "file looks correct $EXTRA" ' - echo "ipfs is super cool" > expected && - ipfs files read /cats/ipfs > output && - test_cmp expected output - ' - - test_expect_success "file hash correct $EXTRA" ' - echo $FILE_HASH > filehash_expected && - ipfs files stat --hash /cats/ipfs > filehash && - test_cmp filehash_expected filehash - ' - - test_expect_success "can't write to negative offset $EXTRA" ' - test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES --offset -1 /cats/ipfs < output - ' - - test_expect_success "verify file was not changed $EXTRA" ' - ipfs files stat --hash /cats/ipfs > afterhash && - test_cmp filehash afterhash - ' - - test_expect_success "write new file for testing $EXTRA" ' - echo foobar | ipfs files write $ARGS $RAW_LEAVES --create /fun - ' - - test_expect_success "write to offset past end works $EXTRA" ' - echo blah | ipfs files write $ARGS $RAW_LEAVES --offset 50 /fun - ' - - test_expect_success "can read file $EXTRA" ' - ipfs files read /fun > sparse_output - ' - - test_expect_success "output looks good $EXTRA" ' - echo foobar > sparse_expected && - echo blah | dd of=sparse_expected bs=50 seek=1 && - test_cmp sparse_expected sparse_output - ' - - test_expect_success "cleanup $EXTRA" ' - ipfs files rm /fun - ' - - test_expect_success "cannot write to directory $EXTRA" ' - ipfs files stat --hash /cats > dirhash && - test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats < output - ' - - test_expect_success "verify dir was not changed $EXTRA" ' - ipfs files stat --hash /cats > afterdirhash && - test_cmp dirhash afterdirhash - ' - - test_expect_success "cannot write to nonexistent path $EXTRA" ' - test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats/bar/ < output - ' - - test_expect_success "no new paths were created $EXTRA" ' - verify_dir_contents /cats file1 ipfs this - ' - - # Temporary check to uncover source of flaky test fail (see - # https://github.com/ipfs/go-ipfs/issues/8131 for more details). - # We suspect that sometimes the daemon isn't running when in fact we need - # it to for the `--flush=false` flag to take effect. To try to spot the - # specific error before it manifests itself in the failed test we explicitly - # poll the damon API when it should be running ($WITH_DAEMON set). - # Test taken from `test/sharness/lib/test-lib.sh` (but with less retries - # as the daemon is either running or not but there is no 'bootstrap' time - # needed in this case). - test_expect_success "'ipfs daemon' is running when WITH_DAEMON is set" ' - test -z "$WITH_DAEMON" || - pollEndpoint -host=$API_MADDR -v -tout=1s -tries=3 2>poll_apierr > poll_apiout || - test_fsh cat actual_daemon || test_fsh cat daemon_err || test_fsh cat poll_apierr || test_fsh cat poll_apiout - ' - - test_expect_success "write 'no-flush' succeeds $EXTRA" ' - echo "testing" | ipfs files write $ARGS $RAW_LEAVES -f=false -e /cats/walrus - ' - - # Skip this test if the commands are not being run through the daemon - # ($WITH_DAEMON not set) as standalone commands will *always* flush - # after being done and the 'no-flush' call from the previous test will - # not be enforced. - test_expect_success "root hash not bubbled up yet $EXTRA" ' - test -z "$WITH_DAEMON" || - (ipfs refs local > refsout && - test_expect_code 1 grep $ROOT_HASH refsout) - ' - - test_expect_success "changes bubbled up to root on inspection $EXTRA" ' - ipfs files stat --hash / > root_hash - ' - - test_expect_success "root hash looks good $EXTRA" ' - export EXP_ROOT_HASH="$ROOT_HASH" && - echo $EXP_ROOT_HASH > root_hash_exp && - test_cmp root_hash_exp root_hash - ' - - test_expect_success "/cats hash looks good $EXTRA" ' - export EXP_CATS_HASH="$CATS_HASH" && - echo $EXP_CATS_HASH > cats_hash_exp && - ipfs files stat --hash /cats > cats_hash - test_cmp cats_hash_exp cats_hash - ' - - test_expect_success "flush root succeeds $EXTRA" ' - ipfs files flush / - ' - - # test mv - test_expect_success "can mv dir $EXTRA" ' - ipfs files mv /cats/this/is /cats/ - ' - - test_expect_success "can mv dir and dest dir is / $EXTRA" ' - ipfs files mv /cats/is / - ' - - test_expect_success "can mv dir and dest dir path has no trailing slash $EXTRA" ' - ipfs files mv /is /cats - ' - - test_expect_success "mv worked $EXTRA" ' - verify_dir_contents /cats file1 ipfs this is walrus && - verify_dir_contents /cats/this - ' - - test_expect_success "cleanup, remove 'cats' $EXTRA" ' - ipfs files rm -r /cats - ' - - test_expect_success "cleanup looks good $EXTRA" ' - verify_dir_contents / - ' - - # test truncating - test_expect_success "create a new file $EXTRA" ' - echo "some content" | ipfs files write $ARGS $RAW_LEAVES --create /cats - ' - - test_expect_success "truncate and write over that file $EXTRA" ' - echo "fish" | ipfs files write $ARGS $RAW_LEAVES --truncate /cats - ' - - test_expect_success "output looks good $EXTRA" ' - ipfs files read /cats > file_out && - echo "fish" > file_exp && - test_cmp file_out file_exp - ' - - test_expect_success "file hash correct $EXTRA" ' - echo $TRUNC_HASH > filehash_expected && - ipfs files stat --hash /cats > filehash && - test_cmp filehash_expected filehash - ' - - test_expect_success "cleanup $EXTRA" ' - ipfs files rm /cats - ' - - # test flush flags - test_expect_success "mkdir --flush works $EXTRA" ' - ipfs files mkdir $ARGS --flush --parents /flushed/deep - ' - - test_expect_success "mkdir --flush works a second time $EXTRA" ' - ipfs files mkdir $ARGS --flush --parents /flushed/deep - ' - - test_expect_success "dir looks right $EXTRA" ' - verify_dir_contents / flushed - ' - - test_expect_success "child dir looks right $EXTRA" ' - verify_dir_contents /flushed deep - ' - - test_expect_success "cleanup $EXTRA" ' - ipfs files rm -r /flushed - ' - - test_expect_success "child dir looks right $EXTRA" ' - verify_dir_contents / - ' - - # test for https://github.com/ipfs/go-ipfs/issues/2654 - test_expect_success "create and remove dir $EXTRA" ' - ipfs files mkdir $ARGS /test_dir && - ipfs files rm -r "/test_dir" - ' - - test_expect_success "create test file $EXTRA" ' - echo "content" | ipfs files write $ARGS $RAW_LEAVES -e "/test_file" - ' - - test_expect_success "copy test file onto test dir $EXTRA" ' - ipfs files cp "/test_file" "/test_dir" - ' - - test_expect_success "test /test_dir $EXTRA" ' - ipfs files stat "/test_dir" | grep -q "^Type: file" - ' - - test_expect_success "clean up /test_dir and /test_file $EXTRA" ' - ipfs files rm -r /test_dir && - ipfs files rm -r /test_file - ' - - test_expect_success "make a directory and a file $EXTRA" ' - ipfs files mkdir $ARGS /adir && - echo "blah" | ipfs files write $ARGS $RAW_LEAVES --create /foobar - ' - - test_expect_success "copy a file into a directory $EXTRA" ' - ipfs files cp /foobar /adir/ - ' - - test_expect_success "file made it into directory $EXTRA" ' - ipfs files ls /adir | grep foobar - ' - - test_expect_success "should fail to write file and create intermediate directories with no --parents flag set $EXTRA" ' - echo "ipfs rocks" | test_must_fail ipfs files write --create /parents/foo/ipfs.txt - ' - - test_expect_success "can write file and create intermediate directories $EXTRA" ' - echo "ipfs rocks" | ipfs files write --create --parents /parents/foo/bar/baz/ipfs.txt && - ipfs files stat "/parents/foo/bar/baz/ipfs.txt" | grep -q "^Type: file" - ' - - test_expect_success "can write file and create intermediate directories with short flags $EXTRA" ' - echo "ipfs rocks" | ipfs files write -e -p /parents/foo/bar/baz/qux/quux/garply/ipfs.txt && - ipfs files stat "/parents/foo/bar/baz/qux/quux/garply/ipfs.txt" | grep -q "^Type: file" - ' - - test_expect_success "can write another file in the same directory with -e -p $EXTRA" ' - echo "ipfs rocks" | ipfs files write -e -p /parents/foo/bar/baz/qux/quux/garply/ipfs2.txt && - ipfs files stat "/parents/foo/bar/baz/qux/quux/garply/ipfs2.txt" | grep -q "^Type: file" - ' - - test_expect_success "clean up $EXTRA" ' - ipfs files rm -r /foobar /adir /parents - ' - - test_expect_success "root mfs entry is empty $EXTRA" ' - verify_dir_contents / - ' - - test_expect_success "repo gc $EXTRA" ' - ipfs repo gc - ' - - # test rm - - test_expect_success "remove file forcibly" ' - echo "hello world" | ipfs files write --create /forcibly && - ipfs files rm --force /forcibly && - verify_dir_contents / - ' - - test_expect_success "remove multiple files forcibly" ' - echo "hello world" | ipfs files write --create /forcibly_one && - echo "hello world" | ipfs files write --create /forcibly_two && - ipfs files rm --force /forcibly_one /forcibly_two && - verify_dir_contents / - ' - - test_expect_success "remove directory forcibly" ' - ipfs files mkdir /forcibly-dir && - ipfs files rm --force /forcibly-dir && - verify_dir_contents / - ' - - test_expect_success "remove multiple directories forcibly" ' - ipfs files mkdir /forcibly-dir-one && - ipfs files mkdir /forcibly-dir-two && - ipfs files rm --force /forcibly-dir-one /forcibly-dir-two && - verify_dir_contents / - ' - - test_expect_success "remove multiple files" ' - echo "hello world" | ipfs files write --create /file_one && - echo "hello world" | ipfs files write --create /file_two && - ipfs files rm /file_one /file_two - ' - - test_expect_success "remove multiple directories" ' - ipfs files mkdir /forcibly-dir-one && - ipfs files mkdir /forcibly-dir-two && - ipfs files rm -r /forcibly-dir-one /forcibly-dir-two && - verify_dir_contents / - ' - - test_expect_success "remove nonexistent path forcibly" ' - ipfs files rm --force /nonexistent - ' - - test_expect_success "remove deeply nonexistent path forcibly" ' - ipfs files rm --force /deeply/nonexistent - ' - - # This one should return code 1 but still remove the rest of the valid files. - test_expect_success "remove multiple files (with nonexistent one)" ' - echo "hello world" | ipfs files write --create /file_one && - echo "hello world" | ipfs files write --create /file_two && - test_expect_code 1 ipfs files rm /file_one /nonexistent /file_two - verify_dir_contents / - ' -} - -# test with and without the daemon (EXTRA="with-daemon" and EXTRA="no-daemon" -# respectively). -# FIXME: Check if we are correctly using the "no-daemon" flag in these test -# combinations. -tests_for_files_api() { - local EXTRA - EXTRA=$1 - - test_expect_success "can create some files for testing ($EXTRA)" ' - create_files - ' - ROOT_HASH=QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt - CATS_HASH=Qma88m8ErTGkZHbBWGqy1C7VmEmX8wwNDWNpGyCaNmEgwC - FILE_HASH=QmQdQt9qooenjeaNhiKHF3hBvmNteB4MQBtgu3jxgf9c7i - TRUNC_HASH=QmPVnT9gocPbqzN4G6SMp8vAPyzcjDbUJrNdKgzQquuDg4 - test_files_api "($EXTRA)" - - test_expect_success "can create some files for testing with raw-leaves ($EXTRA)" ' - create_files --raw-leaves - ' - - if [ "$EXTRA" = "with-daemon" ]; then - ROOT_HASH=QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ - CATS_HASH=QmPhPkmtUGGi8ySPHoPu1qbfryLJKKq1GYxpgLyyCruvGe - test_files_api "($EXTRA, partial raw-leaves)" - fi - - ROOT_HASH=QmW3dMSU6VNd1mEdpk9S3ZYRuR1YwwoXjGaZhkyK6ru9YU - CATS_HASH=QmPqWDEg7NoWRX8Y4vvYjZtmdg5umbfsTQ9zwNr12JoLmt - FILE_HASH=QmRCgHeoKxCqK2Es6M6nPUDVWz19yNQPnsXGsXeuTkSKpN - TRUNC_HASH=QmckstrVxJuecVD1FHUiURJiU9aPURZWJieeBVHJPACj8L - test_files_api "($EXTRA, raw-leaves)" '' --raw-leaves - - ROOT_HASH=QmageRWxC7wWjPv5p36NeAgBAiFdBHaNfxAehBSwzNech2 - CATS_HASH=bafybeig4cpvfu2qwwo3u4ffazhqdhyynfhnxqkzvbhrdbamauthf5mfpuq - FILE_HASH=bafybeibkrazpbejqh3qun7xfnsl7yofl74o4jwhxebpmtrcpavebokuqtm - TRUNC_HASH=bafybeigwhb3q36yrm37jv5fo2ap6r6eyohckqrxmlejrenex4xlnuxiy3e - if [ "$EXTRA" = "with-daemon" ]; then - test_files_api "($EXTRA, cidv1)" --cid-version=1 - fi - - test_expect_success "can update root hash to cidv1" ' - ipfs files chcid --cid-version=1 / && - echo bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354 > hash_expect && - ipfs files stat --hash / > hash_actual && - test_cmp hash_expect hash_actual - ' - - ROOT_HASH=bafybeifxnoetaa2jetwmxubv3gqiyaknnujwkkkhdeua63kulm63dcr5wu - test_files_api "($EXTRA, cidv1 root)" - - if [ "$EXTRA" = "with-daemon" ]; then - test_expect_success "can update root hash to blake2b-256" ' - ipfs files chcid --hash=blake2b-256 / && - echo bafykbzacebugfutjir6qie7apo5shpry32ruwfi762uytd5g3u2gk7tpscndq > hash_expect && - ipfs files stat --hash / > hash_actual && - test_cmp hash_expect hash_actual - ' - ROOT_HASH=bafykbzaceb6jv27itwfun6wsrbaxahpqthh5be2bllsjtb3qpmly3vji4mlfk - CATS_HASH=bafykbzacebhpn7rtcjjc5oa4zgzivhs7a6e2tq4uk4px42bubnmhpndhqtjig - FILE_HASH=bafykbzaceca45w2i3o3q3ctqsezdv5koakz7sxsw37ygqjg4w54m2bshzevxy - TRUNC_HASH=bafykbzaceadeu7onzmlq7v33ytjpmo37rsqk2q6mzeqf5at55j32zxbcdbwig - test_files_api "($EXTRA, blake2b-256 root)" - fi - - test_expect_success "can update root hash back to cidv0" ' - ipfs files chcid / --cid-version=0 && - echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect && - ipfs files stat --hash / > hash_actual && - test_cmp hash_expect hash_actual - ' -} - -tests_for_files_api "no-daemon" - -test_launch_ipfs_daemon_without_network - -WITH_DAEMON=1 -# FIXME: Used only on a specific test inside `test_files_api` but we should instead -# propagate the `"with-daemon"` argument in its caller `tests_for_files_api`. - -tests_for_files_api "with-daemon" - -test_kill_ipfs_daemon - -test_expect_success "enable sharding in config" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" -' - -test_launch_ipfs_daemon_without_network - -SHARD_HASH=QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se -test_sharding "(cidv0)" - -SHARD_HASH=bafybeib46tpawg2d2hhlmmn2jvgio33wqkhlehxrem7wbfvqqikure37rm -test_sharding "(cidv1 root)" "--cid-version=1" - -test_kill_ipfs_daemon - -# Test automatic sharding and unsharding - -# We shard based on size with a threshold of 256 KiB (see config file docs) -# above which directories are sharded. -# -# The directory size is estimated as the size of each link. Links are roughly -# the entry name + the CID byte length (e.g. 34 bytes for a CIDv0). So for -# entries of length 10 we need 256 KiB / (34 + 10) ~ 6000 entries in the -# directory to trigger sharding. -test_expect_success "set up automatic sharding/unsharding data" ' - mkdir big_dir - for i in `seq 5960` # Just above the number of entries that trigger sharding for 256KiB - do - echo $i > big_dir/`printf "file%06d" $i` # fixed length of 10 chars - done -' - -test_expect_success "reset automatic sharding" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold null -' - -test_launch_ipfs_daemon_without_network - -LARGE_SHARDED="QmWfjnRWRvdvYezQWnfbvrvY7JjrpevsE9cato1x76UqGr" -LARGE_MINUS_5_UNSHARDED="QmbVxi5zDdzytrjdufUejM92JsWj8wGVmukk6tiPce3p1m" - -test_add_large_sharded_dir() { - exphash="$1" - test_expect_success "ipfs add on directory succeeds" ' - ipfs add -r -Q big_dir > shardbigdir_out && - echo "$exphash" > shardbigdir_exp && - test_cmp shardbigdir_exp shardbigdir_out - ' - - test_expect_success "can access a path under the dir" ' - ipfs cat "$exphash/file000030" > file30_out && - test_cmp big_dir/file000030 file30_out - ' -} - -test_add_large_sharded_dir "$LARGE_SHARDED" - -test_expect_success "remove a few entries from big_dir/ to trigger unsharding" ' - ipfs files cp /ipfs/"$LARGE_SHARDED" /big_dir && - for i in `seq 5` - do - ipfs files rm /big_dir/`printf "file%06d" $i` - done && - ipfs files stat --hash /big_dir > unshard_dir_hash && - echo "$LARGE_MINUS_5_UNSHARDED" > unshard_exp && - test_cmp unshard_exp unshard_dir_hash -' - -test_expect_success "add a few entries to big_dir/ to retrigger sharding" ' - for i in `seq 5` - do - ipfs files cp /ipfs/"$LARGE_SHARDED"/`printf "file%06d" $i` /big_dir/`printf "file%06d" $i` - done && - ipfs files stat --hash /big_dir > shard_dir_hash && - echo "$LARGE_SHARDED" > shard_exp && - test_cmp shard_exp shard_dir_hash -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0251-files-flushing.sh b/test/sharness/t0251-files-flushing.sh deleted file mode 100755 index 44e3f73dfdf..00000000000 --- a/test/sharness/t0251-files-flushing.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test the unix files api flushing" - -. lib/test-lib.sh - -test_init_ipfs - -verify_path_exists() { - # simply running ls on a file should be a good 'check' - ipfs files ls $1 -} - -verify_dir_contents() { - dir=$1 - shift - rm -f expected - touch expected - for e in $@ - do - echo $e >> expected - done - - test_expect_success "can list dir" ' - ipfs files ls $dir > output - ' - - test_expect_success "dir entries look good" ' - test_sort_cmp output expected - ' -} - -test_launch_ipfs_daemon - -test_expect_success "can copy a file in" ' - HASH=$(echo "foo" | ipfs add -q) && - ipfs files cp /ipfs/$HASH /file -' - -test_kill_ipfs_daemon -test_launch_ipfs_daemon - -test_expect_success "file is still there" ' - verify_path_exists /file -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0252-files-gc.sh b/test/sharness/t0252-files-gc.sh deleted file mode 100755 index f2eb25b4fcc..00000000000 --- a/test/sharness/t0252-files-gc.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Kevin Atkinson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test how the unix files api interacts with the gc" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "object not removed after gc" ' - echo "hello world" > hello.txt && - cat hello.txt | ipfs files write --create /hello.txt && - ipfs repo gc && - ipfs cat QmVib14uvPnCP73XaCDpwugRuwfTsVbGyWbatHAmLSdZUS -' - -test_expect_success "/hello.txt still accessible after gc" ' - ipfs files read /hello.txt > hello-actual && - test_cmp hello.txt hello-actual -' - -ADIR_HASH=QmbCgoMYVuZq8m1vK31JQx9DorwQdLMF1M3sJ7kygLLqnW -FILE1_HASH=QmX4eaSJz39mNhdu5ACUwTDpyA6y24HmrQNnAape6u3buS - -test_expect_success "gc okay after adding incomplete node -- prep" ' - ipfs files mkdir /adir && - echo "file1" | ipfs files write --create /adir/file1 && - echo "file2" | ipfs files write --create /adir/file2 && - ipfs pin add --recursive=false $ADIR_HASH && - ipfs files rm -r /adir && - ipfs repo gc && # will remove /adir/file1 and /adir/file2 but not /adir - test_must_fail ipfs cat $FILE1_HASH && - ipfs files cp /ipfs/$ADIR_HASH /adir && - ipfs pin rm $ADIR_HASH -' - -test_expect_success "gc okay after adding incomplete node" ' - ipfs dag get $ADIR_HASH && - ipfs repo gc && - ipfs dag get $ADIR_HASH -' - -test_expect_success "add directory with direct pin" ' - mkdir mydir/ && - echo "hello world!" > mydir/hello.txt && - FILE_UNPINNED=$(ipfs add --pin=false -q -r mydir/hello.txt) && - DIR_PINNED=$(ipfs add --pin=false -Q -r mydir) && - ipfs add --pin=false -r mydir && - ipfs pin add --recursive=false $DIR_PINNED && - ipfs cat $FILE_UNPINNED -' - -test_expect_success "run gc and make sure directory contents are removed" ' - ipfs repo gc && - test_must_fail ipfs cat $FILE_UNPINNED -' - -test_expect_success "add incomplete directory and make sure gc is okay" ' - ipfs files cp /ipfs/$DIR_PINNED /mydir && - ipfs repo gc && - test_must_fail ipfs cat $FILE_UNPINNED -' - -test_expect_success "add back directory contents and run gc" ' - ipfs add --pin=false mydir/hello.txt && - ipfs repo gc -' - -test_expect_success "make sure directory contents are not removed" ' - ipfs cat $FILE_UNPINNED -' - -test_done diff --git a/test/sharness/t0260-sharding.sh b/test/sharness/t0260-sharding.sh deleted file mode 100755 index 85e4a7ca708..00000000000 --- a/test/sharness/t0260-sharding.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test directory sharding" - -. lib/test-lib.sh - -test_expect_success "set up test data" ' - mkdir testdata - for i in `seq 2000` - do - echo $i > testdata/file$i - done -' - -test_add_dir() { - exphash="$1" - test_expect_success "ipfs add on directory succeeds" ' - ipfs add -r -Q testdata > sharddir_out && - echo "$exphash" > sharddir_exp && - test_cmp sharddir_exp sharddir_out - ' - test_expect_success "ipfs get on directory succeeds" ' - ipfs get -o testdata-out "$exphash" && - test_cmp testdata testdata-out - ' -} - -test_init_ipfs - -UNSHARDED="QmavrTrQG4VhoJmantURAYuw3bowq3E2WcvP36NRQDAC1N" - -test_expect_success "force sharding off" ' -ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1G\"" -' - -test_add_dir "$UNSHARDED" - -test_launch_ipfs_daemon - -test_add_dir "$UNSHARDED" - -test_kill_ipfs_daemon - -test_expect_success "force sharding on" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" -' - -SHARDED="QmSCJD1KYLhVVHqBK3YyXuoEqHt7vggyJhzoFYbT8v1XYL" -test_add_dir "$SHARDED" - -test_launch_ipfs_daemon - -test_add_dir "$SHARDED" - -test_kill_ipfs_daemon - -test_expect_success "sharded and unsharded output look the same" ' - ipfs ls "$SHARDED" | sort > sharded_out && - ipfs ls "$UNSHARDED" | sort > unsharded_out && - test_cmp sharded_out unsharded_out -' - -test_expect_success "ipfs cat error output the same" ' - test_expect_code 1 ipfs cat "$SHARDED" 2> sharded_err && - test_expect_code 1 ipfs cat "$UNSHARDED" 2> unsharded_err && - test_cmp sharded_err unsharded_err -' - -test_expect_success "'ipfs ls --resolve-type=false --size=false' admits missing block" ' - ipfs ls "$SHARDED" | head -1 > first_file && - ipfs ls --size=false "$SHARDED" | sort > sharded_out_nosize && - read -r HASH _ NAME missing_out && - test_cmp sharded_out_nosize missing_out -' - -test_launch_ipfs_daemon - -test_expect_success "gateway can resolve sharded dirs" ' - echo 100 > expected && - curl -sfo actual "http://127.0.0.1:$GWAY_PORT/ipfs/$SHARDED/file100" && - test_cmp expected actual -' - -test_expect_success "'ipfs resolve' can resolve sharded dirs" ' - echo /ipfs/QmZ3RfWk1u5LEGYLHA633B5TNJy3Du27K6Fny9wcxpowGS > expected && - ipfs resolve "/ipfs/$SHARDED/file100" > actual && - test_cmp expected actual -' - -test_kill_ipfs_daemon - -test_add_dir_v1() { - exphash="$1" - test_expect_success "ipfs add (CIDv1) on directory succeeds" ' - ipfs add -r -Q --cid-version=1 testdata > sharddir_out && - echo "$exphash" > sharddir_exp && - test_cmp sharddir_exp sharddir_out - ' - - test_expect_success "can access a path under the dir" ' - ipfs cat "$exphash/file20" > file20_out && - test_cmp testdata/file20 file20_out - ' -} - -# this hash implies the directory is CIDv1 and leaf entries are CIDv1 and raw -SHARDEDV1="bafybeibiemewfzzdyhq2l74wrd6qj2oz42usjlktgnlqv4yfawgouaqn4u" -test_add_dir_v1 "$SHARDEDV1" - -test_launch_ipfs_daemon - -test_add_dir_v1 "$SHARDEDV1" - -test_kill_ipfs_daemon - -test_list_incomplete_dir() { - test_expect_success "ipfs add (CIDv1) on very large directory with sha3 succeeds" ' - ipfs add -r -Q --cid-version=1 --hash=sha3-256 --pin=false testdata > sharddir_out && - largeSHA3dir=$(cat sharddir_out) - ' - - test_expect_success "delete intermediate node from DAG" ' - ipfs block rm "/ipld/$largeSHA3dir/Links/0/Hash" - ' - - test_expect_success "can list part of the directory" ' - ipfs ls "$largeSHA3dir" 2> ls_err_out - echo "Error: failed to fetch all nodes" > exp_err_out && - cat ls_err_out && - test_cmp exp_err_out ls_err_out - ' -} - -test_list_incomplete_dir - -test_done diff --git a/test/sharness/t0270-filestore.sh b/test/sharness/t0270-filestore.sh deleted file mode 100755 index 82b7ae49241..00000000000 --- a/test/sharness/t0270-filestore.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test out the filestore nocopy functionality" - -. lib/test-lib.sh - - -test_expect_success "create a dataset" ' - random-files -seed=483 -depth=3 -dirs=4 -files=6 -filesize=1000000 somedir > /dev/null -' - -EXPHASH="QmW4JLyeTxEWGwa4mkE9mHzdtAkyhMX2ToGFEKZNjCiJud" - -get_repo_size() { - disk_usage "$IPFS_PATH" -} - -assert_repo_size_less_than() { - expval="$1" - - test_expect_success "check repo size" ' - test "$(get_repo_size)" -lt "$expval" || - { echo should be below "$expval" && test_fsh get_repo_size; } - ' -} - -assert_repo_size_greater_than() { - expval="$1" - - test_expect_success "check repo size" ' - test "$(get_repo_size)" -gt "$expval" || - { echo should be above "$expval" && test_fsh get_repo_size; } - ' -} - -test_filestore_adds() { - test_expect_success "nocopy add succeeds" ' - HASH=$(ipfs add --raw-leaves --nocopy -r -Q somedir) - ' - - test_expect_success "nocopy add has right hash" ' - test "$HASH" = "$EXPHASH" - ' - - assert_repo_size_less_than 1000000 - - test_expect_success "normal add with fscache doesn't duplicate data" ' - ipfs add --raw-leaves --fscache -r -q somedir > /dev/null - ' - - assert_repo_size_less_than 1000000 - - test_expect_success "normal add without fscache duplicates data" ' - ipfs add --raw-leaves -r -q somedir > /dev/null - ' - - assert_repo_size_greater_than 1000000 -} - -init_ipfs_filestore() { - test_expect_success "clean up old node" ' - rm -rf "$IPFS_PATH" mountdir ipfs ipns - ' - - test_init_ipfs - - # Check the _early_ error message - test_expect_success "nocopy add errors and has right message" ' - test_must_fail ipfs add --nocopy -r somedir 2> add_out && - grep "either the filestore or the urlstore must be enabled" add_out - ' - - assert_repo_size_less_than 1000000 - - test_expect_success "enable urlstore config setting" ' - ipfs config --json Experimental.UrlstoreEnabled true - ' - - # Check the _late_ error message - test_expect_success "nocopy add errors and has right message when the urlstore is enabled" ' - test_must_fail ipfs add --nocopy -r somedir 2> add_out && - grep "filestore is not enabled" add_out - ' - - assert_repo_size_less_than 1000000 - - test_expect_success "enable filestore config setting" ' - ipfs config --json Experimental.UrlstoreEnabled true && - ipfs config --json Experimental.FilestoreEnabled true - ' -} - -init_ipfs_filestore - -test_filestore_adds - -test_debug ' - echo "pwd=$(pwd)"; echo "IPFS_PATH=$IPFS_PATH" -' - - -init_ipfs_filestore - -test_launch_ipfs_daemon - -test_filestore_adds - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0271-filestore-utils.sh b/test/sharness/t0271-filestore-utils.sh deleted file mode 100755 index c7e814b9d0a..00000000000 --- a/test/sharness/t0271-filestore-utils.sh +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test out the filestore nocopy functionality" - -. lib/test-lib.sh - -test_init_filestore() { - test_expect_success "clean up old node" ' - rm -rf "$IPFS_PATH" mountdir ipfs ipns - ' - - test_init_ipfs - - test_expect_success "enable filestore config setting" ' - ipfs config --json Experimental.FilestoreEnabled true - ' -} - -test_init_dataset() { - test_expect_success "create a dataset" ' - rm -r somedir - mkdir somedir && - random 1000 1 > somedir/file1 && - random 10000 2 > somedir/file2 && - random 1000000 3 > somedir/file3 - ' -} - -test_init() { - test_init_filestore - test_init_dataset -} - -EXPHASH="QmRueCuPMYYvdxWz1vWncF7wzCScEx4qasZXo5aVBb1R4V" - -cat < ls_expect_file_order -bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 -EOF - -sort < ls_expect_file_order > ls_expect_key_order - -FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq -FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey -FILE3_HASH=QmfE4SDQazxTD7u8VTYs9AJqQL8rrJPUAorLeJXKSZrVf9 - -cat < verify_expect_file_order -ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 -EOF - -sort < verify_expect_file_order > verify_expect_key_order - -IPFS_CMD="ipfs" - -test_filestore_adds() { - test_expect_success "$IPFS_CMD add nocopy add succeeds" ' - HASH=$($IPFS_CMD add --raw-leaves --nocopy -r -Q somedir) - ' - - test_expect_success "nocopy add has right hash" ' - test "$HASH" = "$EXPHASH" - ' - - test_expect_success "'$IPFS_CMD filestore ls' output looks good'" ' - $IPFS_CMD filestore ls | sort > ls_actual && - test_cmp ls_expect_key_order ls_actual - ' - - test_expect_success "'$IPFS_CMD filestore ls --file-order' output looks good'" ' - $IPFS_CMD filestore ls --file-order > ls_actual && - test_cmp ls_expect_file_order ls_actual - ' - - test_expect_success "'$IPFS_CMD filestore ls HASH' works" ' - $IPFS_CMD filestore ls $FILE1_HASH > ls_actual && - grep -q somedir/file1 ls_actual - ' - - test_expect_success "can retrieve multi-block file" ' - $IPFS_CMD cat $FILE3_HASH > file3.data && - test_cmp somedir/file3 file3.data - ' -} - -# check that the filestore is in a clean state -test_filestore_state() { - test_expect_success "$IPFS_CMD filestore verify' output looks good'" ' - $IPFS_CMD filestore verify | LC_ALL=C sort > verify_actual - test_cmp verify_expect_key_order verify_actual - ' -} - -test_filestore_verify() { - test_filestore_state - - test_expect_success "$IPFS_CMD filestore verify --file-order' output looks good'" ' - $IPFS_CMD filestore verify --file-order > verify_actual - test_cmp verify_expect_file_order verify_actual - ' - - test_expect_success "'$IPFS_CMD filestore verify HASH' works" ' - $IPFS_CMD filestore verify $FILE1_HASH > verify_actual && - grep -q somedir/file1 verify_actual - ' - - test_expect_success "rename a file" ' - mv somedir/file1 somedir/file1.bk - ' - - test_expect_success "can not retrieve block after backing file moved" ' - test_must_fail $IPFS_CMD cat $FILE1_HASH - ' - - test_expect_success "'$IPFS_CMD filestore verify' shows file as missing" ' - $IPFS_CMD filestore verify > verify_actual && - grep no-file verify_actual | grep -q somedir/file1 - ' - - test_expect_success "move file back" ' - mv somedir/file1.bk somedir/file1 - ' - - test_expect_success "block okay now" ' - $IPFS_CMD cat $FILE1_HASH > file1.data && - test_cmp somedir/file1 file1.data - ' - - test_expect_success "change first bit of file" ' - dd if=/dev/zero of=somedir/file3 bs=1024 count=1 - ' - - test_expect_success "can not retrieve block after backing file changed" ' - test_must_fail $IPFS_CMD cat $FILE3_HASH - ' - - test_expect_success "'$IPFS_CMD filestore verify' shows file as changed" ' - $IPFS_CMD filestore verify > verify_actual && - grep changed verify_actual | grep -q somedir/file3 - ' - - # reset the state for the next test - test_init_dataset -} - -test_filestore_dups() { - # make sure the filestore is in a clean state - test_filestore_state - - test_expect_success "'$IPFS_CMD filestore dups'" ' - $IPFS_CMD add --raw-leaves somedir/file1 && - $IPFS_CMD filestore dups > dups_actual && - echo "$FILE1_HASH" > dups_expect - test_cmp dups_expect dups_actual - ' -} - -# -# No daemon -# - -test_init - -test_filestore_adds - -test_filestore_verify - -test_filestore_dups - -# -# With daemon -# - -test_init - -# must be in offline mode so tests that retrieve non-existent blocks -# doesn't hang -test_launch_ipfs_daemon_without_network - -test_filestore_adds - -test_filestore_verify - -test_filestore_dups - -test_kill_ipfs_daemon - -## -## base32 -## - -EXPHASH="bafybeibva2uh4qpwjo2yr5g7m7nd5kfq64atydq77qdlrikh5uejwqdcbi" - -cat < ls_expect_file_order -bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 -EOF - -sort < ls_expect_file_order > ls_expect_key_order - -FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq -FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey -FILE3_HASH=bafybeih24zygzr2orr5q62mjnbgmjwgj6rx3tp74pwcqsqth44rloncllq - -cat < verify_expect_file_order -ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 -EOF - -sort < verify_expect_file_order > verify_expect_key_order - -IPFS_CMD="ipfs --cid-base=base32" - -# -# No daemon -# - -test_init - -test_filestore_adds - -test_filestore_verify - -test_filestore_dups - -# -# With daemon -# - -test_init - -# must be in offline mode so tests that retrieve non-existent blocks -# doesn't hang -test_launch_ipfs_daemon_without_network - -test_filestore_adds - -test_filestore_verify - -test_filestore_dups - -test_kill_ipfs_daemon - -test_done - -## - -test_done diff --git a/test/sharness/t0272-urlstore.sh b/test/sharness/t0272-urlstore.sh deleted file mode 100755 index 8fa7ff3b81f..00000000000 --- a/test/sharness/t0272-urlstore.sh +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test out the urlstore functionality" - -. lib/test-lib.sh - - -test_expect_success "create some random files" ' - random 2222 7 > file1 && - random 500000 7 > file2 && - random 50000000 7 > file3 -' - -test_urlstore() { - ADD_CMD="${@}" - - test_init_ipfs - - test_expect_success "add files using trickle dag format without raw leaves" ' - HASH1a=$(ipfs add -q --trickle --raw-leaves=false file1) && - HASH2a=$(ipfs add -q --trickle --raw-leaves=false file2) && - HASH3a=$(ipfs add -q --trickle --raw-leaves=false file3) - ' - - test_launch_ipfs_daemon_without_network - - test_expect_success "make sure files can be retrieved via the gateway" ' - curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a -o file1.actual && - test_cmp file1 file1.actual && - curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a -o file2.actual && - test_cmp file2 file2.actual && - curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH3a -o file3.actual && - test_cmp file3 file3.actual - ' - - test_expect_success "add files without enabling url store using $ADD_CMD" ' - test_must_fail ipfs $ADD_CMD http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a && - test_must_fail ipfs $ADD_CMD http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a - ' - - test_kill_ipfs_daemon - - test_expect_success "enable urlstore" ' - ipfs config --json Experimental.UrlstoreEnabled true - ' - - test_launch_ipfs_daemon_without_network - - test_expect_success "add files using gateway address via url store using $ADD_CMD" ' - HASH1=$(ipfs $ADD_CMD --pin=false http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a) && - HASH2=$(ipfs $ADD_CMD http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a) - ' - - test_expect_success "make sure hashes are different" ' - test $HASH1a != $HASH1 && - test $HASH2a != $HASH2 - ' - - test_expect_success "get files via urlstore" ' - rm -f file1.actual file2.actual && - ipfs get $HASH1 -o file1.actual && - test_cmp file1 file1.actual && - ipfs get $HASH2 -o file2.actual && - test_cmp file2 file2.actual - ' - - cat < ls_expect -bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 -bafkreiga7ukbxrxs26fiseijjd7zdd6gmlrmnxhalwfbagxwjv7ck4o34a 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 -EOF - - test_expect_success "ipfs filestore ls works with urls" ' - ipfs filestore ls | sort > ls_actual && - test_cmp ls_expect ls_actual - ' - - cat < verify_expect -ok bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -ok bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 -ok bafkreiga7ukbxrxs26fiseijjd7zdd6gmlrmnxhalwfbagxwjv7ck4o34a 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 -EOF - - test_expect_success "ipfs filestore verify works with urls" ' - ipfs filestore verify | sort > verify_actual && - test_cmp verify_expect verify_actual - ' - - test_expect_success "garbage collect file1 from the urlstore" ' - ipfs repo gc > /dev/null - ' - - test_expect_success "can no longer retrieve file1 from urlstore" ' - rm -f file1.actual && - test_must_fail ipfs get $HASH1 -o file1.actual - ' - - test_expect_success "can still retrieve file2 from urlstore" ' - rm -f file2.actual && - ipfs get $HASH2 -o file2.actual && - test_cmp file2 file2.actual - ' - - test_expect_success "remove original hashes from local gateway" ' - ipfs pin rm $HASH1a $HASH2a && - ipfs repo gc > /dev/null - ' - - test_expect_success "gateway no longer has files" ' - test_must_fail curl -f http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a -o file1.actual - test_must_fail curl -f http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a -o file2.actual - ' - - cat < verify_expect_2 -error bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -error bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 -EOF - - test_expect_success "ipfs filestore verify is correct" ' - ipfs filestore verify | sort > verify_actual_2 && - test_cmp verify_expect_2 verify_actual_2 - ' - - test_expect_success "files cannot be retrieved via the urlstore" ' - test_must_fail ipfs cat $HASH1 > /dev/null && - test_must_fail ipfs cat $HASH2 > /dev/null - ' - - test_expect_success "remove broken files" ' - ipfs pin rm $HASH2 && - ipfs repo gc > /dev/null - ' - - test_expect_success "add large file using gateway address via url store" ' - HASH3=$(ipfs ${ADD_CMD[@]} http://127.0.0.1:$GWAY_PORT/ipfs/$HASH3a) - ' - - test_expect_success "make sure hashes are different" ' - test $HASH3a != $HASH3 - ' - - test_expect_success "get large file via urlstore" ' - rm -f file3.actual && - ipfs get $HASH3 -o file3.actual && - test_cmp file3 file3.actual - ' - - test_expect_success "check that the trickle option works" ' - HASHat=$(ipfs add -q --cid-version=1 --raw-leaves=true -n --trickle file3) && - HASHut=$(ipfs $ADD_CMD --trickle http://127.0.0.1:$GWAY_PORT/ipfs/$HASH3a) && - test $HASHat = $HASHut - ' - - test_expect_success "add files using gateway address via url store using --cid-base=base32" ' - HASH1a=$(ipfs add -q --trickle --raw-leaves=false file1) && - HASH2a=$(ipfs add -q --trickle --raw-leaves=false file2) && - HASH1b32=$(ipfs --cid-base=base32 $ADD_CMD http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a) && - HASH2b32=$(ipfs --cid-base=base32 $ADD_CMD http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a) - ' - - test_kill_ipfs_daemon - - test_expect_success "files cannot be retrieved via the urlstore" ' - test_must_fail ipfs cat $HASH1 > /dev/null && - test_must_fail ipfs cat $HASH2 > /dev/null && - test_must_fail ipfs cat $HASH3 > /dev/null - ' - - test_expect_success "check that the hashes were correct" ' - HASH1e=$(ipfs add -q -n --cid-version=1 --raw-leaves=true file1) && - HASH2e=$(ipfs add -q -n --cid-version=1 --raw-leaves=true file2) && - HASH3e=$(ipfs add -q -n --cid-version=1 --raw-leaves=true file3) && - test $HASH1e = $HASH1 && - test $HASH2e = $HASH2 && - test $HASH3e = $HASH3 - ' - - test_expect_success "check that the base32 hashes were correct" ' - HASH1e32=$(ipfs cid base32 $HASH1e) - HASH2e32=$(ipfs cid base32 $HASH2e) - test $HASH1e32 = $HASH1b32 && - test $HASH2e32 = $HASH2b32 - ' - - test_expect_success "ipfs cleanup" ' - rm -rf "$IPFS_PATH" && rmdir ipfs ipns mountdir - ' -} - -test_urlstore add -q --nocopy --cid-version=1 - -test_done diff --git a/test/sharness/t0275-cid-security.sh b/test/sharness/t0275-cid-security.sh deleted file mode 100755 index e8d26555052..00000000000 --- a/test/sharness/t0275-cid-security.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jakub Sztandera -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Cid Security" - -. lib/test-lib.sh - -test_init_ipfs - -test_expect_success "adding using unsafe function fails with error" ' - echo foo | test_must_fail ipfs add --hash shake-128 2>add_out -' - -test_expect_success "error reason is pointed out" ' - grep "insecure hash functions not allowed" add_out || test_fsh cat add_out -' - -test_expect_success "adding using too short of a hash function gives out an error" ' - echo foo | test_must_fail ipfs block put -f protobuf --mhlen 19 2>block_out -' - -test_expect_success "error reason is pointed out" ' - grep "hashes must be at least 20 bytes long" block_out -' - - -test_cat_get() { - - test_expect_success "ipfs cat fails with unsafe hash function" ' - test_must_fail ipfs cat bafksebhh7d53e 2>ipfs_cat - ' - - - test_expect_success "error reason is pointed out" ' - grep "insecure hash functions not allowed" ipfs_cat - ' - - - test_expect_success "ipfs get fails with too short function" ' - test_must_fail ipfs get bafkreez3itiri7ghbbf6lzej7paxyxy2qznpw 2>ipfs_get - - ' - - test_expect_success "error reason is pointed out" ' - grep "hashes must be at least 20 bytes long" ipfs_get - ' -} - - -test_gc() { - test_expect_success "injecting insecure block" ' - mkdir -p "$IPFS_PATH/blocks/TS" && - cp -f ../t0275-cid-security-data/EICEM7ITSI.data "$IPFS_PATH/blocks/TS" - ' - - test_expect_success "gc works" 'ipfs repo gc > gc_out' - test_expect_success "gc removed bad block" ' - grep bafksebcgpujze gc_out - ' -} - - -# should work offline -test_cat_get -test_gc - -# should work online -test_launch_ipfs_daemon -test_cat_get -test_gc - -test_expect_success "add block linking to insecure" ' - mkdir -p "$IPFS_PATH/blocks/5X" && - cp -f "../t0275-cid-security-data/CIQG6PGTD2VV34S33BE4MNCQITBRFYUPYQLDXYARR3DQW37MOT7K5XI.data" "$IPFS_PATH/blocks/5X" -' - -test_expect_success "ipfs cat fails with code 1 and not timeout" ' - test_expect_code 1 go-timeout 1 ipfs cat QmVpsktzNeJdfWEpyeix93QJdQaBSgRNxebSbYSo9SQPGx -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0276-cidv0v1.sh b/test/sharness/t0276-cidv0v1.sh deleted file mode 100755 index c810f45449d..00000000000 --- a/test/sharness/t0276-cidv0v1.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jakub Sztandera -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="CID Version 0/1 Duality" - -. lib/test-lib.sh - -test_init_ipfs - -# -# -# - -test_expect_success "create two small files" ' - random 1000 7 > afile - random 1000 9 > bfile -' - -test_expect_success "add file using CIDv1 but don't pin" ' - AHASHv1=$(ipfs add -q --cid-version=1 --raw-leaves=false --pin=false afile) -' - -test_expect_success "add file using CIDv0" ' - AHASHv0=$(ipfs add -q --cid-version=0 afile) -' - -test_expect_success "check hashes" ' - test "$(cid-fmt %v-%c $AHASHv0)" = "cidv0-dag-pb" && - test "$(cid-fmt %v-%c $AHASHv1)" = "cidv1-dag-pb" && - test "$(cid-fmt -b z -v 0 %s $AHASHv1)" = "$AHASHv0" -' - -test_expect_success "make sure CIDv1 hash really is in the repo" ' - ipfs block stat $AHASHv1 -' - -test_expect_success "make sure CIDv0 hash really is in the repo" ' - ipfs block stat $AHASHv0 -' - -test_expect_success "run gc" ' - ipfs repo gc -' - -test_expect_success "make sure the CIDv0 hash is in the repo" ' - ipfs block stat $AHASHv0 -' - -test_expect_success "make sure we can get CIDv0 added file" ' - ipfs cat $AHASHv0 > thefile && - test_cmp afile thefile -' - -test_expect_success "make sure the CIDv1 hash is not in the repo" ' - ! ipfs refs local | grep -q $AHASHv1 -' - -test_expect_success "clean up" ' - ipfs pin rm $AHASHv0 && - ipfs repo gc && - ! ipfs refs local | grep -q $AHASHv0 -' - -# -# -# - -test_expect_success "add file using CIDv1 but don't pin" ' - ipfs add -q --cid-version=1 --raw-leaves=false --pin=false afile -' - -test_expect_success "check that we can access the file when converted to CIDv0" ' - ipfs cat $AHASHv0 > thefile && - test_cmp afile thefile -' - -test_expect_success "clean up" ' - ipfs repo gc -' - -test_expect_success "add file using CIDv0 but don't pin" ' - ipfs add -q --cid-version=0 --raw-leaves=false --pin=false afile -' - -test_expect_success "check that we can access the file when converted to CIDv1" ' - ipfs cat $AHASHv1 > thefile && - test_cmp afile thefile -' - -# -# -# - -test_expect_success "set up iptb testbed" ' - iptb testbed create -type localipfs -count 2 -init && - iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true -' - -test_expect_success "start nodes" ' - iptb start -wait && - iptb connect 0 1 -' - -test_expect_success "add afile using CIDv0 to node 0" ' - iptb run 0 -- ipfs add -q --cid-version=0 afile -' - -test_expect_success "get afile using CIDv1 via node 1" ' - iptb -quiet run 1 -- ipfs --timeout=2s cat $AHASHv1 > thefile && - test_cmp afile thefile -' - -test_expect_success "add bfile using CIDv1 to node 0" ' - BHASHv1=$(iptb -quiet run 0 -- ipfs add -q --cid-version=1 --raw-leaves=false bfile) -' - -test_expect_success "get bfile using CIDv0 via node 1" ' - BHASHv0=$(cid-fmt -b z -v 0 %s $BHASHv1) - echo $BHASHv1 && - iptb -quiet run 1 -- ipfs --timeout=2s cat $BHASHv0 > thefile && - test_cmp bfile thefile -' - -test_expect_success "stop testbed" ' - iptb stop -' - -test_done diff --git a/test/sharness/t0280-plugin-dag-jose.sh b/test/sharness/t0280-plugin-dag-jose.sh deleted file mode 100755 index 41d8e7f8ce4..00000000000 --- a/test/sharness/t0280-plugin-dag-jose.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2021 Mohsin Zaidi -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test dag-jose plugin" - -. lib/test-lib.sh - -test_init_ipfs - -test_dag_jose() { - test_expect_success "encode as dag-jose, decode back to original, verify round-trip" $' - find ../t0280-plugin-dag-jose-data -type f | xargs -I {} sh -c \' \ - codec=$(basename $(dirname {})); \ - joseHash=$(ipfs dag put --store-codec dag-jose --input-codec=$codec {}); \ - ipfs dag get --output-codec $codec $joseHash > $(basename {}); \ - diff {} $(basename {}) \' - ' - - test_expect_success "retrieve dag-jose in non-dag-jose encodings" $' - find ../t0280-plugin-dag-jose-data -type f | xargs -I {} sh -c \' \ - codec=$(basename $(dirname {})); \ - joseHash=$(ipfs dag put --store-codec dag-jose --input-codec=$codec {}); \ - ipfs dag get --output-codec dag-cbor $joseHash > /dev/null; \ - ipfs dag get --output-codec dag-json $joseHash > /dev/null \' - ' -} - -# should work offline -test_dag_jose - -# should work online -test_launch_ipfs_daemon -test_dag_jose -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0280-plugin-fx.sh b/test/sharness/t0280-plugin-fx.sh deleted file mode 100755 index ca4d45f0784..00000000000 --- a/test/sharness/t0280-plugin-fx.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test fx plugin" - -. lib/test-lib.sh - -test_init_ipfs - -export GOLOG_LOG_LEVEL="fxtestplugin=debug" -export TEST_FX_PLUGIN=1 -test_launch_ipfs_daemon - -test_expect_success "expected log entry should be present" ' - fgrep "invoked test fx function" daemon_err >/dev/null -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0280-plugin-git.sh b/test/sharness/t0280-plugin-git.sh deleted file mode 100755 index c3ffc882dde..00000000000 --- a/test/sharness/t0280-plugin-git.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jakub Sztandera -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test git plugin" - -. lib/test-lib.sh - -test_init_ipfs - -# from https://github.com/ipfs/go-ipld-git/blob/master/make-test-repo.sh -test_expect_success "prepare test data" ' - tar xzf ../t0280-plugin-git-data/git.tar.gz -' - -test_dag_git() { - test_expect_success "add objects via dag put" ' - find objects -type f -exec ipfs dag put --store-codec=git-raw --input-codec=0x300078 --hash=sha1 {} \; -exec echo -n \; > hashes - ' - - test_expect_success "successfully get added objects" ' - cat hashes | xargs -I {} ipfs dag get -- {} > /dev/null - ' - - test_expect_success "dag get works" ' - echo -n "{\"message\":\"Some version\n\",\"object\":{\"/\":\"baf4bcfeq6c2mspupcvftgevza56h7rmozose6wi\"},\"tag\":\"v1\",\"tagger\":{\"date\":\"1497302532\",\"email\":\"johndoe@example.com\",\"name\":\"John Doe\",\"timezone\":\"+0200\"},\"type\":\"commit\"}" > tag_expected && - ipfs dag get baf4bcfhzi72pcj5cc4ocz7igcduubuu7aa3cddi > tag_actual - ' - - test_expect_success "outputs look correct" ' - test_cmp tag_expected tag_actual - ' - - test_expect_success "path traversals work" ' - echo -n "{\"date\":\"1497302532\",\"email\":\"johndoe@example.com\",\"name\":\"John Doe\",\"timezone\":\"+0200\"}" > author_expected && - echo -n "{\"/\":{\"bytes\":\"YmxvYiAxMgBIZWxsbyB3b3JsZAo\"}}" > file1_expected && - echo -n "{\"/\":{\"bytes\":\"YmxvYiA3ACcsLnB5Zgo\"}}" > file2_expected && - ipfs dag get baf4bcfhzi72pcj5cc4ocz7igcduubuu7aa3cddi/object/author > author_actual && - ipfs dag get baf4bcfhzi72pcj5cc4ocz7igcduubuu7aa3cddi/object/tree/file/hash > file1_actual && - ipfs dag get baf4bcfhzi72pcj5cc4ocz7igcduubuu7aa3cddi/object/parents/0/tree/dir2/hash/f3/hash > file2_actual - ' - - test_expect_success "outputs look correct" ' - test_cmp author_expected author_actual && - test_cmp file1_expected file1_actual && - test_cmp file2_expected file2_actual - ' -} - -# should work offline -test_dag_git - -# should work online -test_launch_ipfs_daemon -test_dag_git -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0280-plugin-peerlog.sh b/test/sharness/t0280-plugin-peerlog.sh deleted file mode 100755 index f240582b82c..00000000000 --- a/test/sharness/t0280-plugin-peerlog.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2017 Jakub Sztandera -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test peerlog plugin" - -. lib/test-lib.sh - -test_expect_success "setup testbed" ' - iptb testbed create -type localipfs -count 2 -force -init -' - -startup_cluster 2 - -test_expect_success "peerlog is disabled by default" ' - go-sleep 100ms - iptb logs 0 >node0logs - test_expect_code 1 grep peerlog node0logs -' - -test_expect_success 'stop iptb' 'iptb stop' - - - -test_expect_success "setup testbed" ' - iptb testbed create -type localipfs -count 2 -force -init -' - -test_expect_success "enable peerlog config setting" ' - iptb run -- ipfs config --json Plugins.Plugins.peerlog.Config.Enabled true -' - -startup_cluster 2 - -test_expect_success "peerlog plugin is logged" ' - go-sleep 100ms - iptb logs 0 >node0logs - grep peerlog node0logs -' - -test_expect_success 'peer id' ' - PEERID_1=$(iptb attr get 1 id) -' - -test_expect_success "peer id is logged" ' - iptb logs 0 | grep -q "$PEERID_1" -' - -test_expect_success 'stop iptb' 'iptb stop' - -test_done diff --git a/test/sharness/t0280-plugin.sh b/test/sharness/t0280-plugin.sh deleted file mode 100755 index d5b8313e2e7..00000000000 --- a/test/sharness/t0280-plugin.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 Protocol Labs -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test plugin loading" - -. lib/test-lib.sh - -if ! test_have_prereq PLUGIN; then - skip_all='skipping plugin tests, plugins not available' - - test_done -fi - -test_init_ipfs - -test_expect_success "ipfs id succeeds" ' - ipfs id -' - -test_expect_success "make a bad plugin" ' - mkdir -p "$IPFS_PATH/plugins" && - echo foobar > "$IPFS_PATH/plugins/foo.so" && - chmod +x "$IPFS_PATH/plugins/foo.so" -' - -test_expect_success "ipfs id fails due to a bad plugin" ' - test_expect_code 1 ipfs id -' - -test_expect_success "cleanup bad plugin" ' - rm "$IPFS_PATH/plugins/foo.so" -' - -test_expect_success "install test plugin" ' - go build \ - -asmflags=all="-trimpath=${GOPATH}" -gcflags=all="-trimpath=${GOPATH}" \ - -buildmode=plugin -o "$IPFS_PATH/plugins/example.so" ../t0280-plugin-data/example.go && - chmod +x "$IPFS_PATH/plugins/example.so" -' - -test_plugin() { - local loads="$1" - local repo="$2" - local config="$3" - - rm -f id_raw_output id_output id_output_expected - - test_expect_success "id runs" ' - ipfs id 2>id_raw_output >/dev/null - ' - - test_expect_success "filter test plugin output" ' - sed -ne "s/^testplugin //p" id_raw_output >id_output - ' - - if [ "$loads" != "true" ]; then - test_expect_success "plugin doesn't load" ' - test_must_be_empty id_output - ' - else - test_expect_success "plugin produces the correct output" ' - echo "$repo" >id_output_expected && - echo "$config" >>id_output_expected && - test_cmp id_output id_output_expected - ' - fi -} - -test_plugin true "$IPFS_PATH" "" - -test_expect_success "disable the plugin" ' - ipfs config --json Plugins.Plugins.test-plugin.Disabled true -' - -test_plugin false - -test_expect_success "re-enable the plugin" ' - ipfs config --json Plugins.Plugins.test-plugin.Disabled false -' - -test_plugin true "$IPFS_PATH" "" - -test_expect_success "configure the plugin" ' - ipfs config Plugins.Plugins.test-plugin.Config foobar -' - -test_plugin true "$IPFS_PATH" "foobar" - -test_expect_success "noplugin flag works" ' - test_must_fail go run -tags=noplugin github.com/ipfs/go-ipfs/cmd/ipfs id > output 2>&1 - test_should_contain "not built with plugin support" output -' - -test_expect_success "noplugin flag works" ' - CGO_ENABLED=0 test_must_fail go run github.com/ipfs/go-ipfs/cmd/ipfs id > output 2>&1 - test_should_contain "not built with cgo support" output -' - -test_done diff --git a/test/sharness/t0290-cid.sh b/test/sharness/t0290-cid.sh deleted file mode 100755 index 8fb36e30e50..00000000000 --- a/test/sharness/t0290-cid.sh +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test cid commands" - -. lib/test-lib.sh - -# note: all "ipfs cid" commands should work without requiring a repo - -CIDv0="QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv" -CIDv1="zdj7WZAAFKPvYPPzyJLso2hhxo8a7ZACFQ4DvvfrNXTHidofr" -CIDb32="bafybeibxm2nsadl3fnxv2sxcxmxaco2jl53wpeorjdzidjwf5aqdg7wa6u" - -CIDbase="QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6" -CIDb32pb="bafybeievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve" -CIDb32raw="bafkreievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve" -CIDb32dagcbor="bafyreievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve" - -test_expect_success "cid base32 works" ' - echo $CIDb32 > expected && - ipfs cid base32 $CIDv0 > actual1 && - test_cmp actual1 expected && - ipfs cid base32 $CIDv1 > actual2 && - test_cmp expected actual2 -' - -test_expect_success "cid format -v 1 -b base58btc" ' - echo $CIDv1 > expected && - ipfs cid format -v 1 -b base58btc $CIDv0 > actual1 && - test_cmp actual1 expected && - ipfs cid format -v 1 -b base58btc $CIDb32 > actual2 && - test_cmp expected actual2 -' - -test_expect_success "cid format -v 0" ' - echo $CIDv0 > expected && - ipfs cid format -v 0 $CIDb32 > actual && - test_cmp expected actual -' - -cat < various_cids -QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo - QmPhk6cJkRcFfZCdYam4c9MKYjFG9V29LswUnbrFNhtk2S -bafybeihtwdtifv43rn5cyilnmkwofdcxi2suqimmo62vn3etf45gjoiuwy -bafybeiek4tfxkc4ov6jsmb63fzbirrsalnjw24zd5xawo2fgxisd4jmpyq -zdj7WgYfT2gfsgiUxzPYboaRbP9H9CxZE5jVMK9pDDwCcKDCR -zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi -uAXASIDsp4T3Wnd6kXFOQaljH3GFK_ixkjMtVhB9VOBrPK3bp - uAXASIDdmmyANeytvXUriuy4BO0lfd2eR0UjygabF6CAzfsD1 -EOF - -cat < various_cids_base32 -bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa -bafybeiauil46g3lb32jemjbl7yspca3twdcg4wwkbsgdgvgdj5fpfv2f64 -bafybeihtwdtifv43rn5cyilnmkwofdcxi2suqimmo62vn3etf45gjoiuwy -bafybeiek4tfxkc4ov6jsmb63fzbirrsalnjw24zd5xawo2fgxisd4jmpyq -bafybeifffq3aeaymxejo37sn5fyaf7nn7hkfmzwdxyjculx3lw4tyhk7uy -bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354 -bafybeib3fhqt3vu532sfyu4qnjmmpxdbjl7cyzemznkyih2vhanm6k3w5e -bafybeibxm2nsadl3fnxv2sxcxmxaco2jl53wpeorjdzidjwf5aqdg7wa6u -EOF - -cat < various_cids_v1 -zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j -zdj7WWnzU3Nbu5rYGWZHKigUXBtAwShs2SHDCM1TQEvC9TeCN -zdj7WmqAbpsfXgiRBtZP1oAP9QWuuY3mqbc5JhpxJkfT3vYCu -zdj7Wen5gtfr7AivXip3zYd1peuq2QfKrqAn4FGiciVWb96YB -zdj7WgYfT2gfsgiUxzPYboaRbP9H9CxZE5jVMK9pDDwCcKDCR -zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi -zdj7WZQrAvnY5ge3FNg5cmCsNwsvpYjdtu2yEmnWYQ4ES7Nzk -zdj7WZAAFKPvYPPzyJLso2hhxo8a7ZACFQ4DvvfrNXTHidofr -EOF - -test_expect_success "cid base32 works from stdin" ' - cat various_cids | ipfs cid base32 > actual && - test_cmp various_cids_base32 actual -' - -test_expect_success "cid format -v 1 -b base58btc works from stdin" ' - cat various_cids | ipfs cid format -v 1 -b base58btc > actual && - test_cmp various_cids_v1 actual -' - -cat < bases_expect - 0 identity -0 48 base2 -b 98 base32 -B 66 base32upper -c 99 base32pad -C 67 base32padupper -f 102 base16 -F 70 base16upper -k 107 base36 -K 75 base36upper -m 109 base64 -M 77 base64pad -t 116 base32hexpad -T 84 base32hexpadupper -u 117 base64url -U 85 base64urlpad -v 118 base32hex -V 86 base32hexupper -z 122 base58btc -Z 90 base58flickr - 128640 base256emoji -EOF - -cat < codecs_expect - 81 cbor - 85 raw - 112 dag-pb - 113 dag-cbor - 114 libp2p-key - 120 git-raw - 123 torrent-info - 124 torrent-file - 129 leofcoin-block - 130 leofcoin-tx - 131 leofcoin-pr - 133 dag-jose - 134 dag-cose - 144 eth-block - 145 eth-block-list - 146 eth-tx-trie - 147 eth-tx - 148 eth-tx-receipt-trie - 149 eth-tx-receipt - 150 eth-state-trie - 151 eth-account-snapshot - 152 eth-storage-trie - 153 eth-receipt-log-trie - 154 eth-reciept-log - 176 bitcoin-block - 177 bitcoin-tx - 178 bitcoin-witness-commitment - 192 zcash-block - 193 zcash-tx - 208 stellar-block - 209 stellar-tx - 224 decred-block - 225 decred-tx - 240 dash-block - 241 dash-tx - 250 swarm-manifest - 251 swarm-feed - 252 beeson - 297 dag-json - 496 swhid-1-snp - 512 json -46083 urdca-2015-canon -46593 json-jcs -EOF - -cat < supported_codecs_expect - 81 cbor - 85 raw - 112 dag-pb - 113 dag-cbor - 114 libp2p-key - 120 git-raw - 133 dag-jose - 297 dag-json - 512 json -EOF - -cat < hashes_expect - 0 identity - 17 sha1 - 18 sha2-256 - 19 sha2-512 - 20 sha3-512 - 21 sha3-384 - 22 sha3-256 - 23 sha3-224 - 25 shake-256 - 26 keccak-224 - 27 keccak-256 - 28 keccak-384 - 29 keccak-512 - 30 blake3 - 86 dbl-sha2-256 -45588 blake2b-160 -45589 blake2b-168 -45590 blake2b-176 -45591 blake2b-184 -45592 blake2b-192 -45593 blake2b-200 -45594 blake2b-208 -45595 blake2b-216 -45596 blake2b-224 -45597 blake2b-232 -45598 blake2b-240 -45599 blake2b-248 -45600 blake2b-256 -45601 blake2b-264 -45602 blake2b-272 -45603 blake2b-280 -45604 blake2b-288 -45605 blake2b-296 -45606 blake2b-304 -45607 blake2b-312 -45608 blake2b-320 -45609 blake2b-328 -45610 blake2b-336 -45611 blake2b-344 -45612 blake2b-352 -45613 blake2b-360 -45614 blake2b-368 -45615 blake2b-376 -45616 blake2b-384 -45617 blake2b-392 -45618 blake2b-400 -45619 blake2b-408 -45620 blake2b-416 -45621 blake2b-424 -45622 blake2b-432 -45623 blake2b-440 -45624 blake2b-448 -45625 blake2b-456 -45626 blake2b-464 -45627 blake2b-472 -45628 blake2b-480 -45629 blake2b-488 -45630 blake2b-496 -45631 blake2b-504 -45632 blake2b-512 -45652 blake2s-160 -45653 blake2s-168 -45654 blake2s-176 -45655 blake2s-184 -45656 blake2s-192 -45657 blake2s-200 -45658 blake2s-208 -45659 blake2s-216 -45660 blake2s-224 -45661 blake2s-232 -45662 blake2s-240 -45663 blake2s-248 -45664 blake2s-256 -EOF - -test_expect_success "cid bases" ' - cut -c 12- bases_expect > expect && - ipfs cid bases > actual && - test_cmp expect actual -' - -test_expect_success "cid bases --prefix" ' - cut -c 1-3,12- bases_expect > expect && - ipfs cid bases --prefix > actual && - test_cmp expect actual -' - -test_expect_success "cid bases --prefix --numeric" ' - ipfs cid bases --prefix --numeric > actual && - test_cmp bases_expect actual -' - -test_expect_success "cid codecs" ' - cut -c 8- codecs_expect > expect && - ipfs cid codecs > actual - test_cmp expect actual -' - -test_expect_success "cid codecs --numeric" ' - ipfs cid codecs --numeric > actual && - test_cmp codecs_expect actual -' - -test_expect_success "cid codecs --supported" ' - cut -c 8- supported_codecs_expect > expect && - ipfs cid codecs --supported > actual - test_cmp expect actual -' - -test_expect_success "cid codecs --supported --numeric" ' - ipfs cid codecs --supported --numeric > actual && - test_cmp supported_codecs_expect actual -' - -test_expect_success "cid hashes" ' - cut -c 8- hashes_expect > expect && - ipfs cid hashes > actual - test_cmp expect actual -' - -test_expect_success "cid hashes --numeric" ' - ipfs cid hashes --numeric > actual && - test_cmp hashes_expect actual -' - -test_expect_success "cid format -c raw" ' - echo $CIDb32raw > expected && - ipfs cid format --mc raw -b base32 $CIDb32pb > actual && - test_cmp actual expected -' - -test_expect_success "cid format --mc dag-pb -v 0" ' - echo $CIDbase > expected && - ipfs cid format --mc dag-pb -v 0 $CIDb32raw > actual && - test_cmp actual expected -' - -test_expect_success "cid format --mc dag-cbor" ' - echo $CIDb32dagcbor > expected && - ipfs cid format --mc dag-cbor $CIDb32pb > actual && - test_cmp actual expected -' - -# this was an old flag that we removed, explicitly to force an error -# so the user would read about the new multicodec names introduced -# by https://github.com/ipfs/go-cid/commit/b2064d74a8b098193b316689a715cdf4e4934805 -test_expect_success "cid format --codec fails" ' - echo "Error: unknown option \"codec\"" > expected && - test_expect_code 1 ipfs cid format --codec protobuf 2> actual && - test_cmp actual expected -' - -test_expect_success "cid format -b base256emoji " ' - echo "πŸš€πŸͺβ­πŸ’»πŸ˜…β“πŸ’ŽπŸŒˆπŸŒΈπŸŒšπŸ’°πŸ’πŸŒ’πŸ˜΅πŸΆπŸ’πŸ€πŸŒŽπŸ‘ΌπŸ™ƒπŸ™…β˜ΊπŸŒšπŸ˜žπŸ€€β­πŸš€πŸ˜ƒβœˆπŸŒ•πŸ˜šπŸ»πŸ’œπŸ·βš½βœŒπŸ˜Š" > expected && - ipfs cid format -b base256emoji bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi > actual && - test_cmp actual expected -' - -test_expect_success "cid format -b base32 " ' - echo "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" > expected && - ipfs cid format -b base32 πŸš€πŸͺβ­πŸ’»πŸ˜…β“πŸ’ŽπŸŒˆπŸŒΈπŸŒšπŸ’°πŸ’πŸŒ’πŸ˜΅πŸΆπŸ’πŸ€πŸŒŽπŸ‘ΌπŸ™ƒπŸ™…β˜ΊπŸŒšπŸ˜žπŸ€€β­πŸš€πŸ˜ƒβœˆπŸŒ•πŸ˜šπŸ»πŸ’œπŸ·βš½βœŒπŸ˜Š > actual && - test_cmp actual expected -' - - -test_done diff --git a/test/sharness/t0295-multibase.sh b/test/sharness/t0295-multibase.sh deleted file mode 100755 index 76d3a09f790..00000000000 --- a/test/sharness/t0295-multibase.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test multibase commands" - -. lib/test-lib.sh - -# note: all "ipfs multibase" commands should work without requiring a repo - -cat < bases_expect - 0 identity -0 48 base2 -b 98 base32 -B 66 base32upper -c 99 base32pad -C 67 base32padupper -f 102 base16 -F 70 base16upper -k 107 base36 -K 75 base36upper -m 109 base64 -M 77 base64pad -t 116 base32hexpad -T 84 base32hexpadupper -u 117 base64url -U 85 base64urlpad -v 118 base32hex -V 86 base32hexupper -z 122 base58btc -Z 90 base58flickr - 128640 base256emoji -EOF - -# TODO: expose same cmd under multibase? -test_expect_success "multibase list" ' - cut -c 12- bases_expect > expect && - ipfs multibase list > actual && - test_cmp expect actual -' - -test_expect_success "multibase encode works (stdin)" ' - echo -n uaGVsbG8 > expected && - echo -n hello | ipfs multibase encode > actual && - test_cmp actual expected -' - -test_expect_success "multibase encode works (file)" ' - echo -n hello > file && - echo -n uaGVsbG8 > expected && - ipfs multibase encode ./file > actual && - test_cmp actual expected -' - -test_expect_success "multibase encode -b (custom base)" ' - echo -n f68656c6c6f > expected && - echo -n hello | ipfs multibase encode -b base16 > actual && - test_cmp actual expected -' - -test_expect_success "multibase decode works (stdin)" ' - echo -n hello > expected && - echo -n uaGVsbG8 | ipfs multibase decode > actual && - test_cmp actual expected -' - -test_expect_success "multibase decode works (file)" ' - echo -n uaGVsbG8 > file && - echo -n hello > expected && - ipfs multibase decode ./file > actual && - test_cmp actual expected -' - -test_expect_success "multibase encode+decode roundtrip" ' - echo -n hello > expected && - cat expected | ipfs multibase encode -b base64 | ipfs multibase decode > actual && - test_cmp actual expected -' - -test_expect_success "mutlibase transcode works (stdin)" ' - echo -n f68656c6c6f > expected && - echo -n uaGVsbG8 | ipfs multibase transcode -b base16 > actual && - test_cmp actual expected -' - -test_expect_success "multibase transcode works (file)" ' - echo -n uaGVsbG8 > file && - echo -n f68656c6c6f > expected && - ipfs multibase transcode ./file -b base16> actual && - test_cmp actual expected -' - -test_expect_success "multibase error on unknown multibase prefix" ' - echo "Error: failed to decode multibase: selected encoding not supported" > expected && - echo -n Δ™-that-should-do-the-trick | ipfs multibase decode 2> actual ; - test_cmp actual expected -' - -test_expect_success "multibase error on a character outside of the base" " - echo \"Error: failed to decode multibase: encoding/hex: invalid byte: U+007A 'z'\" > expected && - echo -n f6c6f6cz | ipfs multibase decode 2> actual ; - test_cmp actual expected -" - -test_done diff --git a/test/sharness/t0320-pubsub.sh b/test/sharness/t0320-pubsub.sh deleted file mode 100755 index 5635b842ec2..00000000000 --- a/test/sharness/t0320-pubsub.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test pubsub command" - -. lib/test-lib.sh - -# start iptb + wait for peering -NUM_NODES=5 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init -' - -test_expect_success 'disable the DHT' ' - iptb run -- ipfs config Routing.Type none -' - -run_pubsub_tests() { - test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_2=$(iptb attr get 2 id) - ' - - # ipfs pubsub sub - test_expect_success 'pubsub' ' - echo -n -e "test\nOK" | ipfs multibase encode -b base64url > expected && - touch empty && - mkfifo wait || - test_fsh echo init fail - - # ipfs pubsub sub is long-running so we need to start it in the background and - # wait put its output somewhere where we can access it - ( - ipfsi 0 pubsub sub --enc=json testTopic | if read line; then - echo $line | jq -j .data > actual && - echo > wait - fi - ) & - ' - - test_expect_success "wait until ipfs pubsub sub is ready to do work" ' - go-sleep 500ms - ' - - test_expect_success "can see peer subscribed to testTopic" ' - ipfsi 1 pubsub peers testTopic > peers_out - ' - - test_expect_success "output looks good" ' - echo $PEERID_0 > peers_exp && - test_cmp peers_exp peers_out - ' - - test_expect_success "publish something from file" ' - echo -n -e "test\nOK" > payload-file && - ipfsi 1 pubsub pub testTopic payload-file &> pubErr - ' - - test_expect_success "wait until echo > wait executed" ' - cat wait && - test_cmp pubErr empty && - test_cmp expected actual - ' - - test_expect_success "wait for another pubsub message" ' - echo -n -e "test\nOK\r\n2" | ipfs multibase encode -b base64url > expected && - mkfifo wait2 || - test_fsh echo init fail - - # ipfs pubsub sub is long-running so we need to start it in the background and - # wait put its output somewhere where we can access it - ( - ipfsi 2 pubsub sub --enc=json testTopic | if read line; then - echo $line | jq -j .data > actual && - echo > wait2 - fi - ) & - ' - - test_expect_success "wait until ipfs pubsub sub is ready to do work" ' - go-sleep 500ms - ' - - test_expect_success "publish something from stdin" ' - echo -n -e "test\nOK\r\n2" | ipfsi 3 pubsub pub testTopic &> pubErr - ' - - test_expect_success "wait until echo > wait executed" ' - cat wait2 && - test_cmp pubErr empty && - test_cmp expected actual - ' - - test_expect_success 'cleanup fifos' ' - rm -f wait wait2 - ' - -} - -# Normal tests - enabled via config - -test_expect_success 'enable the pubsub' ' - iptb run -- ipfs config --json Pubsub.Enabled true -' - -startup_cluster $NUM_NODES -run_pubsub_tests -test_expect_success 'stop iptb' ' - iptb stop -' - -test_expect_success 'disable the pubsub' ' - iptb run -- ipfs config --json Pubsub.Enabled false -' - -# Normal tests - enabled via daemon option flag - -startup_cluster $NUM_NODES --enable-pubsub-experiment -run_pubsub_tests -test_expect_success 'stop iptb' ' - iptb stop -' - -# Test with some nodes not signing messages. - -test_expect_success 'disable signing on nodes 1-3' ' - iptb run [0-3] -- ipfs config --json Pubsub.DisableSigning true -' - -startup_cluster $NUM_NODES --enable-pubsub-experiment - -test_expect_success 'set node 4 to listen on testTopic' ' - rm -f node4_actual && - ipfsi 4 pubsub sub --enc=json testTopic > node4_actual & -' - -run_pubsub_tests - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_expect_success 'node 4 got no unsigned messages' ' - test_must_be_empty node4_actual -' - - -# Confirm negative CLI flag takes precedence over positive config - -# --enable-pubsub-experiment=false + Pubsub.Enabled:true - -test_expect_success 'enable the pubsub via config' ' - iptb run -- ipfs config --json Pubsub.Enabled true -' -startup_cluster $NUM_NODES --enable-pubsub-experiment=false - -test_expect_success 'pubsub cmd fails because it was disabled via cli flag' ' - test_expect_code 1 ipfsi 4 pubsub ls 2> pubsub_cmd_out -' - -test_expect_success "pubsub cmd produces error" ' - echo "Error: experimental pubsub feature not enabled, run daemon with --enable-pubsub-experiment to use" > expected && - test_cmp expected pubsub_cmd_out -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0321-pubsub-gossipsub.sh b/test/sharness/t0321-pubsub-gossipsub.sh deleted file mode 100755 index c89c8d1ae92..00000000000 --- a/test/sharness/t0321-pubsub-gossipsub.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test pubsub with gossipsub" - -. lib/test-lib.sh - -# start iptb + wait for peering -NUM_NODES=5 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init -' - -test_expect_success "enable gossipsub" ' - for x in $(seq 0 4); do - ipfsi $x config Pubsub.Router gossipsub - done -' - -# this is just a copy of t0180-pubsub; smell. -startup_cluster $NUM_NODES --enable-pubsub-experiment - -test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_2=$(iptb attr get 2 id) -' - -test_expect_success 'pubsub' ' - echo -n -e "test\nOK" | ipfs multibase encode -b base64url > expected && - touch empty && - mkfifo wait || - test_fsh echo init fail - - # ipfs pubsub sub is long-running so we need to start it in the background and - # wait put its output somewhere where we can access it - ( - ipfsi 0 pubsub sub --enc=json testTopic | if read line; then - echo $line | jq -j .data > actual && - echo > wait - fi - ) & -' - -test_expect_success "wait until ipfs pubsub sub is ready to do work" ' - go-sleep 500ms -' - -test_expect_success "can see peer subscribed to testTopic" ' - ipfsi 1 pubsub peers testTopic > peers_out -' - -test_expect_success "output looks good" ' - echo $PEERID_0 > peers_exp && - test_cmp peers_exp peers_out -' - -test_expect_success "publish something from a file" ' - echo -n -e "test\nOK" > payload-file && - ipfsi 1 pubsub pub testTopic payload-file &> pubErr -' - -test_expect_success "wait until echo > wait executed" ' - cat wait && - test_cmp pubErr empty && - test_cmp expected actual -' - -test_expect_success "wait for another pubsub message" ' - echo -n -e "test\nOK2" | ipfs multibase encode -b base64url > expected && - mkfifo wait2 || - test_fsh echo init fail - - # ipfs pubsub sub is long-running so we need to start it in the background and - # wait put its output somewhere where we can access it - ( - ipfsi 2 pubsub sub --enc=json testTopic | if read line; then - echo $line | jq -j .data > actual && - echo > wait2 - fi - ) & -' - -test_expect_success "wait until ipfs pubsub sub is ready to do work" ' - go-sleep 500ms -' - -test_expect_success "publish something" ' - echo -n -e "test\nOK2" | ipfsi 1 pubsub pub testTopic &> pubErr -' - -test_expect_success "wait until echo > wait executed" ' - cat wait2 && - test_cmp pubErr empty && - test_cmp expected actual -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0322-pubsub-http-rpc.sh b/test/sharness/t0322-pubsub-http-rpc.sh deleted file mode 100755 index 4ecfefb6977..00000000000 --- a/test/sharness/t0322-pubsub-http-rpc.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test pubsub command behavior over HTTP RPC API" - -. lib/test-lib.sh - -test_init_ipfs -test_launch_ipfs_daemon --enable-pubsub-experiment - -# Require topic as multibase -# https://github.com/ipfs/go-ipfs/pull/8183 -test_expect_success "/api/v0/pubsub/pub URL arg must be multibase encoded" ' - echo test > data.txt && - curl -s -X POST -F "data=@data.txt" "$API_ADDR/api/v0/pubsub/pub?arg=foobar" > result && - test_should_contain "error" result && - test_should_contain "URL arg must be multibase encoded" result -' - -# Use URL-safe multibase -# base64 should produce error when used in URL args, base64url should be used -test_expect_success "/api/v0/pubsub/pub URL arg must be in URL-safe multibase" ' - echo test > data.txt && - curl -s -X POST -F "data=@data.txt" "$API_ADDR/api/v0/pubsub/pub?arg=mZm9vYmFyCg" > result && - test_should_contain "error" result && - test_should_contain "URL arg must be base64url encoded" result -' - -test_kill_ipfs_daemon -test_done diff --git a/test/sharness/t0400-api-no-gateway.sh b/test/sharness/t0400-api-no-gateway.sh deleted file mode 100755 index d0daeece3da..00000000000 --- a/test/sharness/t0400-api-no-gateway.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Lars Gierth -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test API security" - -. lib/test-lib.sh - -test_init_ipfs - -# Import test case -# See the static fixtures in ./t0400-api-no-gateway/ -test_expect_success "Add the test directory" ' - ipfs dag import ../t0400-api-no-gateway/fixtures.car -' -HASH=QmNYERzV2LfD2kkfahtfv44ocHzEFK1sLBaE7zdcYT2GAZ # a file containing the string "testing" - -# by default, we don't let you load arbitrary ipfs objects through the api, -# because this would open up the api to scripting vulnerabilities. -# only the webui objects are allowed. -# if you know what you're doing, go ahead and pass --unrestricted-api. - -test_launch_ipfs_daemon -test_expect_success "Gateway on API unavailable" ' - test_curl_resp_http_code "http://127.0.0.1:$API_PORT/ipfs/$HASH" "HTTP/1.1 404 Not Found" -' -test_kill_ipfs_daemon - -test_launch_ipfs_daemon --unrestricted-api -test_expect_success "Gateway on --unrestricted-api API available" ' - test_curl_resp_http_code "http://127.0.0.1:$API_PORT/ipfs/$HASH" "HTTP/1.1 200 OK" -' -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0401-api-browser-security.sh b/test/sharness/t0401-api-browser-security.sh deleted file mode 100755 index f288259d5f6..00000000000 --- a/test/sharness/t0401-api-browser-security.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2020 Protocol Labs -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test API browser security" - -. lib/test-lib.sh - -test_init_ipfs - -PEERID=$(ipfs config Identity.PeerID) - -test_launch_ipfs_daemon - -test_expect_success "browser is unable to access API without Origin" ' - curl -sD - -X POST -A "Mozilla" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 403 Forbidden" curl_output -' - -test_expect_success "browser is unable to access API with invalid Origin" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: https://invalid.example.com" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 403 Forbidden" curl_output -' - -test_expect_success "browser is able to access API if Origin is the API port on localhost (ipv4)" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: http://127.0.0.1:$API_PORT" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_expect_success "browser is able to access API if Origin is the API port on localhost (ipv6)" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: http://[::1]:$API_PORT" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_expect_success "browser is able to access API if Origin is the API port on localhost (localhost name)" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: http://localhost:$API_PORT" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_expect_success "Random browser extension is unable to access RPC API due to invalid Origin" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: chrome-extension://invalidextensionid" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 403 Forbidden" curl_output -' - -test_expect_success "Companion extension is able to access RPC API on localhost" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: chrome-extension://nibjojkomfdiaoajekhjakgkdhaomnch" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - cat curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_expect_success "Companion beta extension is able to access API on localhost" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: chrome-extension://hjoieblefckbooibpepigmacodalfndh" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_kill_ipfs_daemon - -test_expect_success "setting CORS in API.HTTPHeaders works via CLI" " - ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin '[\"https://valid.example.com\"]' && - ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods '[\"POST\"]' && - ipfs config --json API.HTTPHeaders.Access-Control-Allow-Headers '[\"X-Requested-With\"]' -" - -test_launch_ipfs_daemon - -test_expect_success "Companion extension is able to access RPC API even when custom Access-Control-Allow-Origin is set" ' - ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin | grep -q valid.example.com && - curl -sD - -X POST -A "Mozilla" -H "Origin: chrome-extension://nibjojkomfdiaoajekhjakgkdhaomnch" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - cat curl_output && - grep "HTTP/1.1 200 OK" curl_output && - grep "$PEERID" curl_output -' - -# https://developer.mozilla.org/en-US/docs/Glossary/Preflight_request -test_expect_success "OPTIONS with preflight request to API with CORS allowlist succeeds" ' - curl -svX OPTIONS -A "Mozilla" -H "Origin: https://valid.example.com" -H "Access-Control-Request-Method: POST" -H "Access-Control-Request-Headers: origin, x-requested-with" "http://127.0.0.1:$API_PORT/api/v0/id" 2>curl_output && - cat curl_output -' - -# OPTION Response from Gateway should contain CORS headers, otherwise JS won't work -test_expect_success "OPTIONS response for API with CORS allowslist looks good" ' - grep "< Access-Control-Allow-Origin: https://valid.example.com" curl_output -' - -test_expect_success "browser is able to access API with valid Origin matching CORS allowlist" ' - curl -sD - -X POST -A "Mozilla" -H "Origin: https://valid.example.com" "http://127.0.0.1:$API_PORT/api/v0/id" >curl_output && - grep "HTTP/1.1 200 OK" curl_output && grep "$PEERID" curl_output -' - -test_kill_ipfs_daemon -test_done diff --git a/test/sharness/t0410-api-add.sh b/test/sharness/t0410-api-add.sh deleted file mode 100755 index da81f8332ec..00000000000 --- a/test/sharness/t0410-api-add.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Tom O'Donnell -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test API add command" - -. lib/test-lib.sh - -test_init_ipfs - -# Verify that the API add command returns size - -test_launch_ipfs_daemon -test_expect_success "API Add response includes size field" ' - echo "hi" | curl -s -F file=@- "http://localhost:$API_PORT/api/v0/add" | grep "\"Size\": *\"11\"" -' -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0500-issues-and-regressions-offline.sh b/test/sharness/t0500-issues-and-regressions-offline.sh deleted file mode 100755 index 5a361aae9dc..00000000000 --- a/test/sharness/t0500-issues-and-regressions-offline.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -test_description="Tests for various fixed issues and regressions." - -. lib/test-lib.sh - -# Tests go here - -test_expect_success "ipfs init with occupied input works - #2748" ' - export IPFS_PATH="ipfs_path" - echo "" | go-timeout 10 ipfs init && - rm -rf ipfs_path -' -test_init_ipfs - -test_expect_success "ipfs cat --help succeeds when input remains open" ' - yes | go-timeout 1 ipfs cat --help -' - -test_expect_success "ipfs pin ls --help succeeds when input remains open" ' - yes | go-timeout 1 ipfs pin ls --help -' - -test_expect_success "ipfs add on 1MB from stdin woks" ' - random 1048576 42 | ipfs add -q > 1MB.hash -' - -test_expect_success "'ipfs refs -r -e \$(cat 1MB.hash)' succeeds" ' - ipfs refs -r -e $(cat 1MB.hash) > refs-e.out -' - -test_expect_success "output of 'ipfs refs -e' links to separate blocks" ' - grep "$(cat 1MB.hash) ->" refs-e.out -' - -test_expect_success "output of 'ipfs refs -e' contains all first level links" ' - grep "$(cat 1MB.hash) ->" refs-e.out | sed -e '\''s/.* -> //'\'' | sort > refs-s.out && - ipfs refs "$(cat 1MB.hash)" | sort > refs-one.out && - test_cmp refs-s.out refs-one.out -' - -test_done diff --git a/test/sharness/t0600-issues-and-regressions-online.sh b/test/sharness/t0600-issues-and-regressions-online.sh deleted file mode 100755 index 809121640f7..00000000000 --- a/test/sharness/t0600-issues-and-regressions-online.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash - -test_description="Tests for various fixed issues and regressions." - -. lib/test-lib.sh - -test_init_ipfs --empty-repo=false - -test_launch_ipfs_daemon - -# Tests go here - -test_expect_success "commands command with flag flags works via HTTP API - #2301" ' - curl -X POST "http://$API_ADDR/api/v0/commands?flags" | grep "verbose" -' - -test_expect_success "ipfs refs local over HTTP API returns NDJOSN not flat - #2803" ' - echo "Hello World" | ipfs add && - curl -X POST "http://$API_ADDR/api/v0/refs/local" | grep "Ref" | grep "Err" -' - -test_expect_success "args expecting stdin don't crash when not given" ' - curl -X POST "$API_ADDR/api/v0/bootstrap/add" > result -' - -test_expect_success "no panic traces on daemon" ' - test_must_fail grep "nil pointer dereference" daemon_err -' - -test_expect_success "metrics work" ' - curl -X POST "$API_ADDR/debug/metrics/prometheus" > pro_data && - grep "ipfs_bs_cache_boxo_blockstore_cache_total" < pro_data || - test_fsh cat pro_data -' - -test_expect_success "pin add api looks right - #3753" ' - HASH=$(date +"%FT%T.%N%z" | ipfs add -q) && - curl -X POST "http://$API_ADDR/api/v0/pin/add/$HASH" > pinadd_out && - echo "{\"Pins\":[\"$HASH\"]}" > pinadd_exp && - test_cmp pinadd_out pinadd_exp -' - -test_expect_success "pin add api looks right - #3753" ' - curl -X POST "http://$API_ADDR/api/v0/pin/rm/$HASH" > pinrm_out && - echo "{\"Pins\":[\"$HASH\"]}" > pinrm_exp && - test_cmp pinrm_out pinrm_exp -' - -test_expect_success SOCAT "no daemon crash on improper file argument - #4003 ( test needs socat )" ' - FNC=$(echo $API_ADDR | awk -F: '\''{ printf "%s:%s", $1, $2 }'\'') && - printf "POST /api/v0/add?pin=true HTTP/1.1\r\nHost: $API_ADDR\r\nContent-Type: multipart/form-data; boundary=Pyw9xQLtiLPE6XcI\r\nContent-Length: 22\r\n\r\n\r\n--Pyw9xQLtiLPE6XcI\r\n" | socat STDIO tcp-connect:$FNC | grep -m1 "500 Internal Server Error" -' - -test_kill_ipfs_daemon - -test_expect_success "ipfs daemon --offline --mount fails - #2995" ' - test_expect_code 1 ipfs daemon --offline --mount 2>daemon_err && - grep "mount is not currently supported in offline mode" daemon_err || - test_fsh cat daemon_err -' - -test_launch_ipfs_daemon_without_network - -test_expect_success "'ipfs name resolve' succeeds after ipfs id when daemon offline" ' - PEERID=`ipfs key list --ipns-base=base36 -l | grep self | cut -d " " -f1` && - test_check_peerid "${PEERID}" && - ipfs name publish --allow-offline -Q "/ipfs/$HASH_WELCOME_DOCS" >publish_out -' - -test_expect_success "pubrmlish --quieter output looks good" ' - echo "${PEERID}" >expected1 && - test_cmp expected1 publish_out -' - -test_expect_success "'ipfs name resolve' succeeds" ' - ipfs name resolve "$PEERID" >output -' - -test_expect_success "resolve output looks good" ' - printf "/ipfs/%s\n" "$HASH_WELCOME_DOCS" >expected2 && - test_cmp expected2 output -' - -test_kill_ipfs_daemon - -test_done - diff --git a/test/sharness/t0800-blake3.sh b/test/sharness/t0800-blake3.sh deleted file mode 100755 index a90a676da46..00000000000 --- a/test/sharness/t0800-blake3.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2020 Claudia Richoux -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test blake3 mhash support" - -. lib/test-lib.sh - -test_init_ipfs - -# the blake3 hash of "foo\n" in UTF8 (which is what comes out of echo when you pipe into `ipfs`) starts with "49dc870df1de7fd60794cebce449f5ccdae575affaa67a24b62acb03e039db92" -# without the newline it's "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9". so if you start seeing these values that's your problem -BLAKE3RAWCID32BYTE="bafkr4icj3sdq34o6p7lapfgoxtset5om3lsxll72uz5cjnrkzmb6aoo3si" -BLAKE3RAWCID64BYTE="bafkr4qcj3sdq34o6p7lapfgoxtset5om3lsxll72uz5cjnrkzmb6aoo3sknmbprpe27pbfrb67tonydgfot5ixuq4skiva76ppbgpjlzc4ua4" -BLAKE3RAWCID128BYTE="bafkr5aabjhoiodpr3z75mb4uz26oispvztnok5np7kthujfwflfqhybz3ojjvqf6f4tl54eweh36nzxamyv2pvc6sdsjjcud7z54ez5fpelsqdtax2k3rvuq3wdl5a4blxv3gvsroa3nakfzzknamhu2apf3vvytyiobabrn2bfnfajq66ikjy5lewsp5jyddsg5l7u3emr2ancimryay" - -### block tests, including for various sizes of hash ### - -test_expect_success "putting a block with an mhash blake3 succeeds (default 32 bytes)" ' - HASH=$(echo "foo" | ipfs block put --mhtype=blake3 --cid-codec=raw | tee actual_out) && - test $BLAKE3RAWCID32BYTE = "$HASH" -' - -test_expect_success "block get output looks right" ' - ipfs block get $BLAKE3RAWCID32BYTE > blk_get_out && - echo "foo" > blk_get_exp && - test_cmp blk_get_exp blk_get_out -' - -test_expect_success "putting a block with an mhash blake3 succeeds: 64 bytes" ' - HASH=$(echo "foo" | ipfs block put --mhtype=blake3 --mhlen=64 --cid-codec=raw | tee actual_out) && - test $BLAKE3RAWCID64BYTE = "$HASH" -' - -test_expect_success "64B block get output looks right" ' - ipfs block get $BLAKE3RAWCID64BYTE > blk_get_out && - echo "foo" > blk_get_exp && - test_cmp blk_get_exp blk_get_out -' - -test_expect_success "putting a block with an mhash blake3 succeeds: 128 bytes" ' - HASH=$(echo "foo" | ipfs block put --mhtype=blake3 --mhlen=128 --cid-codec=raw | tee actual_out) && - test $BLAKE3RAWCID128BYTE = "$HASH" -' - -test_expect_success "32B block get output looks right" ' - ipfs block get $BLAKE3RAWCID128BYTE > blk_get_out && - echo "foo" > blk_get_exp && - test_cmp blk_get_exp blk_get_out -' - -### dag tests ### - -test_expect_success "dag put works with blake3" ' - HASH=$(echo "foo" | ipfs dag put --input-codec=raw --store-codec=raw --hash=blake3 | tee actual_out) && - test $BLAKE3RAWCID32BYTE = "$HASH" -' - -test_expect_success "dag get output looks right" ' - ipfs dag get --output-codec=raw $BLAKE3RAWCID32BYTE > dag_get_out && - echo "foo" > dag_get_exp && - test_cmp dag_get_exp dag_get_out -' - -### add and cat tests ### - -test_expect_success "adding a file with just foo in it to ipfs" ' - echo "foo" > afile && - HASH=$(ipfs add -q --hash=blake3 --raw-leaves afile | tee actual_out) && - test $BLAKE3RAWCID32BYTE = "$HASH" -' - -test_expect_success "catting it" ' - ipfs cat $BLAKE3RAWCID32BYTE > cat_out && - echo "foo" > cat_exp && - test_cmp cat_exp cat_out -' - -test_done diff --git a/test/sharness/x0601-pin-fail-test.sh b/test/sharness/x0601-pin-fail-test.sh deleted file mode 100755 index ffab1062d07..00000000000 --- a/test/sharness/x0601-pin-fail-test.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2016 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test very large number of pins" - -. lib/test-lib.sh - -test_init_ipfs - -test_launch_ipfs_daemon - -test_expect_success "pre-test setup" ' - printf "" > pins && - ipfs pin ls --type=recursive -q > rec_pins_before -' - - -for i in `seq 9000` -do - test_expect_success "ipfs add (and pin) a file" ' - echo $i | ipfs add -q >> pins - ' -done - -test_expect_success "get pinset afterwards" ' - ipfs pin ls --type=recursive -q | sort > rec_pins_after && - cat pins rec_pins_before | sort | uniq > exp_pins_after && - test_cmp rec_pins_after exp_pins_after -' - -test_kill_ipfs_daemon - -test_done -